aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/Makefile5
-rw-r--r--drivers/scsi/lpfc/lpfc.h358
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c760
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h182
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c971
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c508
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h50
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3377
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2262
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h558
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c948
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c306
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1325
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c557
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2047
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h47
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c523
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h113
23 files changed, 10453 insertions, 4566 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index d1be465d5f55..1c286707dd5f 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
1#/******************************************************************* 1#/*******************************************************************
2# * This file is part of the Emulex Linux Device Driver for * 2# * This file is part of the Emulex Linux Device Driver for *
3# * Fibre Channel Host Bus Adapters. * 3# * Fibre Channel Host Bus Adapters. *
4# * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5# * EMULEX and SLI are trademarks of Emulex. * 5# * EMULEX and SLI are trademarks of Emulex. *
6# * www.emulex.com * 6# * www.emulex.com *
7# * * 7# * *
@@ -27,4 +27,5 @@ endif
27obj-$(CONFIG_SCSI_LPFC) := lpfc.o 27obj-$(CONFIG_SCSI_LPFC) := lpfc.o
28 28
29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ 29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o 30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
31 lpfc_vport.o lpfc_debugfs.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 82e8f90c4617..f8f64d6485cd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -19,8 +19,9 @@
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21 21
22struct lpfc_sli2_slim; 22#include <scsi/scsi_host.h>
23 23
24struct lpfc_sli2_slim;
24 25
25#define LPFC_MAX_TARGET 256 /* max number of targets supported */ 26#define LPFC_MAX_TARGET 256 /* max number of targets supported */
26#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 27#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
@@ -32,6 +33,20 @@ struct lpfc_sli2_slim;
32#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 33#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
33#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 34#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
34 35
36/*
37 * Following time intervals are used of adjusting SCSI device
38 * queue depths when there are driver resource error or Firmware
39 * resource error.
40 */
41#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */
42#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */
43
44/* Number of exchanges reserved for discovery to complete */
45#define LPFC_DISC_IOCB_BUFF_COUNT 20
46
47#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
48#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
49
35/* Define macros for 64 bit support */ 50/* Define macros for 64 bit support */
36#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) 51#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
37#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) 52#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
@@ -61,6 +76,11 @@ struct lpfc_dma_pool {
61 uint32_t current_count; 76 uint32_t current_count;
62}; 77};
63 78
79struct hbq_dmabuf {
80 struct lpfc_dmabuf dbuf;
81 uint32_t tag;
82};
83
64/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 84/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
65#define MEM_PRI 0x100 85#define MEM_PRI 0x100
66 86
@@ -90,6 +110,29 @@ typedef struct lpfc_vpd {
90 uint32_t sli2FwRev; 110 uint32_t sli2FwRev;
91 uint8_t sli2FwName[16]; 111 uint8_t sli2FwName[16];
92 } rev; 112 } rev;
113 struct {
114#ifdef __BIG_ENDIAN_BITFIELD
115 uint32_t rsvd2 :24; /* Reserved */
116 uint32_t cmv : 1; /* Configure Max VPIs */
117 uint32_t ccrp : 1; /* Config Command Ring Polling */
118 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
119 uint32_t chbs : 1; /* Cofigure Host Backing store */
120 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
121 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
122 uint32_t cmx : 1; /* Configure Max XRIs */
123 uint32_t cmr : 1; /* Configure Max RPIs */
124#else /* __LITTLE_ENDIAN */
125 uint32_t cmr : 1; /* Configure Max RPIs */
126 uint32_t cmx : 1; /* Configure Max XRIs */
127 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
128 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
129 uint32_t chbs : 1; /* Cofigure Host Backing store */
130 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
131 uint32_t ccrp : 1; /* Config Command Ring Polling */
132 uint32_t cmv : 1; /* Configure Max VPIs */
133 uint32_t rsvd2 :24; /* Reserved */
134#endif
135 } sli3Feat;
93} lpfc_vpd_t; 136} lpfc_vpd_t;
94 137
95struct lpfc_scsi_buf; 138struct lpfc_scsi_buf;
@@ -122,6 +165,7 @@ struct lpfc_stats {
122 uint32_t elsRcvRPS; 165 uint32_t elsRcvRPS;
123 uint32_t elsRcvRPL; 166 uint32_t elsRcvRPL;
124 uint32_t elsXmitFLOGI; 167 uint32_t elsXmitFLOGI;
168 uint32_t elsXmitFDISC;
125 uint32_t elsXmitPLOGI; 169 uint32_t elsXmitPLOGI;
126 uint32_t elsXmitPRLI; 170 uint32_t elsXmitPRLI;
127 uint32_t elsXmitADISC; 171 uint32_t elsXmitADISC;
@@ -165,50 +209,186 @@ struct lpfc_sysfs_mbox {
165 struct lpfcMboxq * mbox; 209 struct lpfcMboxq * mbox;
166}; 210};
167 211
212struct lpfc_hba;
213
214
215enum discovery_state {
216 LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
217 LPFC_VPORT_FAILED = 1, /* vport has failed */
218 LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */
219 LPFC_FLOGI = 7, /* FLOGI sent to Fabric */
220 LPFC_FDISC = 8, /* FDISC sent for vport */
221 LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id
222 * configured */
223 LPFC_NS_REG = 10, /* Register with NameServer */
224 LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */
225 LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for
226 * device authentication / discovery */
227 LPFC_DISC_AUTH = 13, /* Processing ADISC list */
228 LPFC_VPORT_READY = 32,
229};
230
231enum hba_state {
232 LPFC_LINK_UNKNOWN = 0, /* HBA state is unknown */
233 LPFC_WARM_START = 1, /* HBA state after selective reset */
234 LPFC_INIT_START = 2, /* Initial state after board reset */
235 LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */
236 LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */
237 LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */
238 LPFC_CLEAR_LA = 6, /* authentication cmplt - issue
239 * CLEAR_LA */
240 LPFC_HBA_READY = 32,
241 LPFC_HBA_ERROR = -1
242};
243
244struct lpfc_vport {
245 struct list_head listentry;
246 struct lpfc_hba *phba;
247 uint8_t port_type;
248#define LPFC_PHYSICAL_PORT 1
249#define LPFC_NPIV_PORT 2
250#define LPFC_FABRIC_PORT 3
251 enum discovery_state port_state;
252
253 uint16_t vpi;
254
255 uint32_t fc_flag; /* FC flags */
256/* Several of these flags are HBA centric and should be moved to
257 * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
258 */
259#define FC_PT2PT 0x1 /* pt2pt with no fabric */
260#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
261#define FC_DISC_TMO 0x4 /* Discovery timer running */
262#define FC_PUBLIC_LOOP 0x8 /* Public loop */
263#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
264#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
265#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
266#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
267#define FC_FABRIC 0x100 /* We are fabric attached */
268#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
269#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
270#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
271#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
272#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
273#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
274#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */
275#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
276#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
277
278 struct list_head fc_nodes;
279
280 /* Keep counters for the number of entries in each list. */
281 uint16_t fc_plogi_cnt;
282 uint16_t fc_adisc_cnt;
283 uint16_t fc_reglogin_cnt;
284 uint16_t fc_prli_cnt;
285 uint16_t fc_unmap_cnt;
286 uint16_t fc_map_cnt;
287 uint16_t fc_npr_cnt;
288 uint16_t fc_unused_cnt;
289 struct serv_parm fc_sparam; /* buffer for our service parameters */
290
291 uint32_t fc_myDID; /* fibre channel S_ID */
292 uint32_t fc_prevDID; /* previous fibre channel S_ID */
293
294 int32_t stopped; /* HBA has not been restarted since last ERATT */
295 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
296
297 uint32_t num_disc_nodes; /*in addition to hba_state */
298
299 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
300 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
301 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
302 struct lpfc_name fc_nodename; /* fc nodename */
303 struct lpfc_name fc_portname; /* fc portname */
304
305 struct lpfc_work_evt disc_timeout_evt;
306
307 struct timer_list fc_disctmo; /* Discovery rescue timer */
308 uint8_t fc_ns_retry; /* retries for fabric nameserver */
309 uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
310
311 spinlock_t work_port_lock;
312 uint32_t work_port_events; /* Timeout to be handled */
313#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
314#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
315#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
316
317#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
318#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
319#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timout */
320#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
321#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
322
323 struct timer_list fc_fdmitmo;
324 struct timer_list els_tmofunc;
325
326 int unreg_vpi_cmpl;
327
328 uint8_t load_flag;
329#define FC_LOADING 0x1 /* HBA in process of loading drvr */
330#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
331 char *vname; /* Application assigned name */
332 struct fc_vport *fc_vport;
333
334#ifdef CONFIG_LPFC_DEBUG_FS
335 struct dentry *debug_disc_trc;
336 struct dentry *debug_nodelist;
337 struct dentry *vport_debugfs_root;
338 struct lpfc_disc_trc *disc_trc;
339 atomic_t disc_trc_cnt;
340#endif
341};
342
343struct hbq_s {
344 uint16_t entry_count; /* Current number of HBQ slots */
345 uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
346 uint32_t hbqPutIdx; /* HBQ slot to use */
347 uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
348};
349
350#define LPFC_MAX_HBQS 16
351/* this matches the possition in the lpfc_hbq_defs array */
352#define LPFC_ELS_HBQ 0
353
168struct lpfc_hba { 354struct lpfc_hba {
169 struct lpfc_sli sli; 355 struct lpfc_sli sli;
356 uint32_t sli_rev; /* SLI2 or SLI3 */
357 uint32_t sli3_options; /* Mask of enabled SLI3 options */
358#define LPFC_SLI3_ENABLED 0x01
359#define LPFC_SLI3_HBQ_ENABLED 0x02
360#define LPFC_SLI3_NPIV_ENABLED 0x04
361#define LPFC_SLI3_VPORT_TEARDOWN 0x08
362 uint32_t iocb_cmd_size;
363 uint32_t iocb_rsp_size;
364
365 enum hba_state link_state;
366 uint32_t link_flag; /* link state flags */
367#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
368 /* This flag is set while issuing */
369 /* INIT_LINK mailbox command */
370#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
371#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */
372
170 struct lpfc_sli2_slim *slim2p; 373 struct lpfc_sli2_slim *slim2p;
374 struct lpfc_dmabuf hbqslimp;
375
171 dma_addr_t slim2p_mapping; 376 dma_addr_t slim2p_mapping;
377
172 uint16_t pci_cfg_value; 378 uint16_t pci_cfg_value;
173 379
174 int32_t hba_state; 380 uint8_t work_found;
175 381#define LPFC_MAX_WORKER_ITERATION 4
176#define LPFC_STATE_UNKNOWN 0 /* HBA state is unknown */
177#define LPFC_WARM_START 1 /* HBA state after selective reset */
178#define LPFC_INIT_START 2 /* Initial state after board reset */
179#define LPFC_INIT_MBX_CMDS 3 /* Initialize HBA with mbox commands */
180#define LPFC_LINK_DOWN 4 /* HBA initialized, link is down */
181#define LPFC_LINK_UP 5 /* Link is up - issue READ_LA */
182#define LPFC_LOCAL_CFG_LINK 6 /* local NPORT Id configured */
183#define LPFC_FLOGI 7 /* FLOGI sent to Fabric */
184#define LPFC_FABRIC_CFG_LINK 8 /* Fabric assigned NPORT Id
185 configured */
186#define LPFC_NS_REG 9 /* Register with NameServer */
187#define LPFC_NS_QRY 10 /* Query NameServer for NPort ID list */
188#define LPFC_BUILD_DISC_LIST 11 /* Build ADISC and PLOGI lists for
189 * device authentication / discovery */
190#define LPFC_DISC_AUTH 12 /* Processing ADISC list */
191#define LPFC_CLEAR_LA 13 /* authentication cmplt - issue
192 CLEAR_LA */
193#define LPFC_HBA_READY 32
194#define LPFC_HBA_ERROR -1
195 382
196 int32_t stopped; /* HBA has not been restarted since last ERATT */
197 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 383 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
198 384
199 uint32_t fc_eventTag; /* event tag for link attention */ 385 uint32_t fc_eventTag; /* event tag for link attention */
200 uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
201 386
202 uint32_t num_disc_nodes; /*in addition to hba_state */
203 387
204 struct timer_list fc_estabtmo; /* link establishment timer */ 388 struct timer_list fc_estabtmo; /* link establishment timer */
205 struct timer_list fc_disctmo; /* Discovery rescue timer */
206 struct timer_list fc_fdmitmo; /* fdmi timer */
207 /* These fields used to be binfo */ 389 /* These fields used to be binfo */
208 struct lpfc_name fc_nodename; /* fc nodename */
209 struct lpfc_name fc_portname; /* fc portname */
210 uint32_t fc_pref_DID; /* preferred D_ID */ 390 uint32_t fc_pref_DID; /* preferred D_ID */
211 uint8_t fc_pref_ALPA; /* preferred AL_PA */ 391 uint8_t fc_pref_ALPA; /* preferred AL_PA */
212 uint32_t fc_edtov; /* E_D_TOV timer value */ 392 uint32_t fc_edtov; /* E_D_TOV timer value */
213 uint32_t fc_arbtov; /* ARB_TOV timer value */ 393 uint32_t fc_arbtov; /* ARB_TOV timer value */
214 uint32_t fc_ratov; /* R_A_TOV timer value */ 394 uint32_t fc_ratov; /* R_A_TOV timer value */
@@ -216,61 +396,21 @@ struct lpfc_hba {
216 uint32_t fc_altov; /* AL_TOV timer value */ 396 uint32_t fc_altov; /* AL_TOV timer value */
217 uint32_t fc_crtov; /* C_R_TOV timer value */ 397 uint32_t fc_crtov; /* C_R_TOV timer value */
218 uint32_t fc_citov; /* C_I_TOV timer value */ 398 uint32_t fc_citov; /* C_I_TOV timer value */
219 uint32_t fc_myDID; /* fibre channel S_ID */
220 uint32_t fc_prevDID; /* previous fibre channel S_ID */
221 399
222 struct serv_parm fc_sparam; /* buffer for our service parameters */
223 struct serv_parm fc_fabparam; /* fabric service parameters buffer */ 400 struct serv_parm fc_fabparam; /* fabric service parameters buffer */
224 uint8_t alpa_map[128]; /* AL_PA map from READ_LA */ 401 uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
225 402
226 uint8_t fc_ns_retry; /* retries for fabric nameserver */
227 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
228 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
229 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
230 uint32_t lmt; 403 uint32_t lmt;
231 uint32_t fc_flag; /* FC flags */
232#define FC_PT2PT 0x1 /* pt2pt with no fabric */
233#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
234#define FC_DISC_TMO 0x4 /* Discovery timer running */
235#define FC_PUBLIC_LOOP 0x8 /* Public loop */
236#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
237#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
238#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
239#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
240#define FC_FABRIC 0x100 /* We are fabric attached */
241#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
242#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
243#define FC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
244#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
245#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */
246#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
247#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
248#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
249#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
250#define FC_LOOPBACK_MODE 0x40000 /* NPort is in Loopback mode */
251 /* This flag is set while issuing */
252 /* INIT_LINK mailbox command */
253#define FC_IGNORE_ERATT 0x80000 /* intr handler should ignore ERATT */
254 404
255 uint32_t fc_topology; /* link topology, from LINK INIT */ 405 uint32_t fc_topology; /* link topology, from LINK INIT */
256 406
257 struct lpfc_stats fc_stat; 407 struct lpfc_stats fc_stat;
258 408
259 struct list_head fc_nodes;
260
261 /* Keep counters for the number of entries in each list. */
262 uint16_t fc_plogi_cnt;
263 uint16_t fc_adisc_cnt;
264 uint16_t fc_reglogin_cnt;
265 uint16_t fc_prli_cnt;
266 uint16_t fc_unmap_cnt;
267 uint16_t fc_map_cnt;
268 uint16_t fc_npr_cnt;
269 uint16_t fc_unused_cnt;
270 struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */ 409 struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
271 uint32_t nport_event_cnt; /* timestamp for nlplist entry */ 410 uint32_t nport_event_cnt; /* timestamp for nlplist entry */
272 411
273 uint32_t wwnn[2]; 412 uint8_t wwnn[8];
413 uint8_t wwpn[8];
274 uint32_t RandomData[7]; 414 uint32_t RandomData[7];
275 415
276 uint32_t cfg_log_verbose; 416 uint32_t cfg_log_verbose;
@@ -278,6 +418,9 @@ struct lpfc_hba {
278 uint32_t cfg_nodev_tmo; 418 uint32_t cfg_nodev_tmo;
279 uint32_t cfg_devloss_tmo; 419 uint32_t cfg_devloss_tmo;
280 uint32_t cfg_hba_queue_depth; 420 uint32_t cfg_hba_queue_depth;
421 uint32_t cfg_peer_port_login;
422 uint32_t cfg_vport_restrict_login;
423 uint32_t cfg_npiv_enable;
281 uint32_t cfg_fcp_class; 424 uint32_t cfg_fcp_class;
282 uint32_t cfg_use_adisc; 425 uint32_t cfg_use_adisc;
283 uint32_t cfg_ack0; 426 uint32_t cfg_ack0;
@@ -304,22 +447,20 @@ struct lpfc_hba {
304 447
305 lpfc_vpd_t vpd; /* vital product data */ 448 lpfc_vpd_t vpd; /* vital product data */
306 449
307 struct Scsi_Host *host;
308 struct pci_dev *pcidev; 450 struct pci_dev *pcidev;
309 struct list_head work_list; 451 struct list_head work_list;
310 uint32_t work_ha; /* Host Attention Bits for WT */ 452 uint32_t work_ha; /* Host Attention Bits for WT */
311 uint32_t work_ha_mask; /* HA Bits owned by WT */ 453 uint32_t work_ha_mask; /* HA Bits owned by WT */
312 uint32_t work_hs; /* HS stored in case of ERRAT */ 454 uint32_t work_hs; /* HS stored in case of ERRAT */
313 uint32_t work_status[2]; /* Extra status from SLIM */ 455 uint32_t work_status[2]; /* Extra status from SLIM */
314 uint32_t work_hba_events; /* Timeout to be handled */
315#define WORKER_DISC_TMO 0x1 /* Discovery timeout */
316#define WORKER_ELS_TMO 0x2 /* ELS timeout */
317#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */
318#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */
319 456
320 wait_queue_head_t *work_wait; 457 wait_queue_head_t *work_wait;
321 struct task_struct *worker_thread; 458 struct task_struct *worker_thread;
322 459
460 struct list_head hbq_buffer_list;
461 uint32_t hbq_count; /* Count of configured HBQs */
462 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
463
323 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 464 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
324 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 465 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
325 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 466 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
@@ -334,6 +475,10 @@ struct lpfc_hba {
334 reg */ 475 reg */
335 void __iomem *HCregaddr; /* virtual address for host ctl reg */ 476 void __iomem *HCregaddr; /* virtual address for host ctl reg */
336 477
478 struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
479 uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
480 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
481
337 int brd_no; /* FC board number */ 482 int brd_no; /* FC board number */
338 483
339 char SerialNumber[32]; /* adapter Serial Number */ 484 char SerialNumber[32]; /* adapter Serial Number */
@@ -353,7 +498,6 @@ struct lpfc_hba {
353 uint8_t soft_wwn_enable; 498 uint8_t soft_wwn_enable;
354 499
355 struct timer_list fcp_poll_timer; 500 struct timer_list fcp_poll_timer;
356 struct timer_list els_tmofunc;
357 501
358 /* 502 /*
359 * stat counters 503 * stat counters
@@ -370,31 +514,69 @@ struct lpfc_hba {
370 uint32_t total_scsi_bufs; 514 uint32_t total_scsi_bufs;
371 struct list_head lpfc_iocb_list; 515 struct list_head lpfc_iocb_list;
372 uint32_t total_iocbq_bufs; 516 uint32_t total_iocbq_bufs;
517 spinlock_t hbalock;
373 518
374 /* pci_mem_pools */ 519 /* pci_mem_pools */
375 struct pci_pool *lpfc_scsi_dma_buf_pool; 520 struct pci_pool *lpfc_scsi_dma_buf_pool;
376 struct pci_pool *lpfc_mbuf_pool; 521 struct pci_pool *lpfc_mbuf_pool;
522 struct pci_pool *lpfc_hbq_pool;
377 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 523 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
378 524
379 mempool_t *mbox_mem_pool; 525 mempool_t *mbox_mem_pool;
380 mempool_t *nlp_mem_pool; 526 mempool_t *nlp_mem_pool;
381 527
382 struct fc_host_statistics link_stats; 528 struct fc_host_statistics link_stats;
529
530 struct list_head port_list;
531 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
532 uint16_t max_vpi; /* Maximum virtual nports */
533#define LPFC_MAX_VPI 100 /* Max number of VPorts supported */
534 unsigned long *vpi_bmask; /* vpi allocation table */
535
536 /* Data structure used by fabric iocb scheduler */
537 struct list_head fabric_iocb_list;
538 atomic_t fabric_iocb_count;
539 struct timer_list fabric_block_timer;
540 unsigned long bit_flags;
541#define FABRIC_COMANDS_BLOCKED 0
542 atomic_t num_rsrc_err;
543 atomic_t num_cmd_success;
544 unsigned long last_rsrc_error_time;
545 unsigned long last_ramp_down_time;
546 unsigned long last_ramp_up_time;
547#ifdef CONFIG_LPFC_DEBUG_FS
548 struct dentry *hba_debugfs_root;
549 atomic_t debugfs_vport_count;
550#endif
551
552 /* Fields used for heart beat. */
553 unsigned long last_completion_time;
554 struct timer_list hb_tmofunc;
555 uint8_t hb_outstanding;
383}; 556};
384 557
558static inline struct Scsi_Host *
559lpfc_shost_from_vport(struct lpfc_vport *vport)
560{
561 return container_of((void *) vport, struct Scsi_Host, hostdata[0]);
562}
563
385static inline void 564static inline void
386lpfc_set_loopback_flag(struct lpfc_hba *phba) { 565lpfc_set_loopback_flag(struct lpfc_hba *phba)
566{
387 if (phba->cfg_topology == FLAGS_LOCAL_LB) 567 if (phba->cfg_topology == FLAGS_LOCAL_LB)
388 phba->fc_flag |= FC_LOOPBACK_MODE; 568 phba->link_flag |= LS_LOOPBACK_MODE;
389 else 569 else
390 phba->fc_flag &= ~FC_LOOPBACK_MODE; 570 phba->link_flag &= ~LS_LOOPBACK_MODE;
391} 571}
392 572
393struct rnidrsp { 573static inline int
394 void *buf; 574lpfc_is_link_up(struct lpfc_hba *phba)
395 uint32_t uniqueid; 575{
396 struct list_head list; 576 return phba->link_state == LPFC_LINK_UP ||
397 uint32_t data; 577 phba->link_state == LPFC_CLEAR_LA ||
398}; 578 phba->link_state == LPFC_HBA_READY;
579}
399 580
400#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ 581#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
582
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5dfda9778c80..860a52c090f4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -39,6 +39,7 @@
39#include "lpfc_version.h" 39#include "lpfc_version.h"
40#include "lpfc_compat.h" 40#include "lpfc_compat.h"
41#include "lpfc_crtn.h" 41#include "lpfc_crtn.h"
42#include "lpfc_vport.h"
42 43
43#define LPFC_DEF_DEVLOSS_TMO 30 44#define LPFC_DEF_DEVLOSS_TMO 30
44#define LPFC_MIN_DEVLOSS_TMO 1 45#define LPFC_MIN_DEVLOSS_TMO 1
@@ -76,116 +77,156 @@ static ssize_t
76lpfc_info_show(struct class_device *cdev, char *buf) 77lpfc_info_show(struct class_device *cdev, char *buf)
77{ 78{
78 struct Scsi_Host *host = class_to_shost(cdev); 79 struct Scsi_Host *host = class_to_shost(cdev);
80
79 return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); 81 return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
80} 82}
81 83
82static ssize_t 84static ssize_t
83lpfc_serialnum_show(struct class_device *cdev, char *buf) 85lpfc_serialnum_show(struct class_device *cdev, char *buf)
84{ 86{
85 struct Scsi_Host *host = class_to_shost(cdev); 87 struct Scsi_Host *shost = class_to_shost(cdev);
86 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 88 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
89 struct lpfc_hba *phba = vport->phba;
90
87 return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); 91 return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
88} 92}
89 93
90static ssize_t 94static ssize_t
91lpfc_modeldesc_show(struct class_device *cdev, char *buf) 95lpfc_modeldesc_show(struct class_device *cdev, char *buf)
92{ 96{
93 struct Scsi_Host *host = class_to_shost(cdev); 97 struct Scsi_Host *shost = class_to_shost(cdev);
94 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 98 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
99 struct lpfc_hba *phba = vport->phba;
100
95 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); 101 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
96} 102}
97 103
98static ssize_t 104static ssize_t
99lpfc_modelname_show(struct class_device *cdev, char *buf) 105lpfc_modelname_show(struct class_device *cdev, char *buf)
100{ 106{
101 struct Scsi_Host *host = class_to_shost(cdev); 107 struct Scsi_Host *shost = class_to_shost(cdev);
102 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 108 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
109 struct lpfc_hba *phba = vport->phba;
110
103 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); 111 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
104} 112}
105 113
106static ssize_t 114static ssize_t
107lpfc_programtype_show(struct class_device *cdev, char *buf) 115lpfc_programtype_show(struct class_device *cdev, char *buf)
108{ 116{
109 struct Scsi_Host *host = class_to_shost(cdev); 117 struct Scsi_Host *shost = class_to_shost(cdev);
110 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 118 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
119 struct lpfc_hba *phba = vport->phba;
120
111 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); 121 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
112} 122}
113 123
114static ssize_t 124static ssize_t
115lpfc_portnum_show(struct class_device *cdev, char *buf) 125lpfc_vportnum_show(struct class_device *cdev, char *buf)
116{ 126{
117 struct Scsi_Host *host = class_to_shost(cdev); 127 struct Scsi_Host *shost = class_to_shost(cdev);
118 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 128 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
129 struct lpfc_hba *phba = vport->phba;
130
119 return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port); 131 return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
120} 132}
121 133
122static ssize_t 134static ssize_t
123lpfc_fwrev_show(struct class_device *cdev, char *buf) 135lpfc_fwrev_show(struct class_device *cdev, char *buf)
124{ 136{
125 struct Scsi_Host *host = class_to_shost(cdev); 137 struct Scsi_Host *shost = class_to_shost(cdev);
126 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 138 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
139 struct lpfc_hba *phba = vport->phba;
127 char fwrev[32]; 140 char fwrev[32];
141
128 lpfc_decode_firmware_rev(phba, fwrev, 1); 142 lpfc_decode_firmware_rev(phba, fwrev, 1);
129 return snprintf(buf, PAGE_SIZE, "%s\n",fwrev); 143 return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
130} 144}
131 145
132static ssize_t 146static ssize_t
133lpfc_hdw_show(struct class_device *cdev, char *buf) 147lpfc_hdw_show(struct class_device *cdev, char *buf)
134{ 148{
135 char hdw[9]; 149 char hdw[9];
136 struct Scsi_Host *host = class_to_shost(cdev); 150 struct Scsi_Host *shost = class_to_shost(cdev);
137 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 151 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
152 struct lpfc_hba *phba = vport->phba;
138 lpfc_vpd_t *vp = &phba->vpd; 153 lpfc_vpd_t *vp = &phba->vpd;
154
139 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); 155 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
140 return snprintf(buf, PAGE_SIZE, "%s\n", hdw); 156 return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
141} 157}
142static ssize_t 158static ssize_t
143lpfc_option_rom_version_show(struct class_device *cdev, char *buf) 159lpfc_option_rom_version_show(struct class_device *cdev, char *buf)
144{ 160{
145 struct Scsi_Host *host = class_to_shost(cdev); 161 struct Scsi_Host *shost = class_to_shost(cdev);
146 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 162 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
163 struct lpfc_hba *phba = vport->phba;
164
147 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); 165 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
148} 166}
149static ssize_t 167static ssize_t
150lpfc_state_show(struct class_device *cdev, char *buf) 168lpfc_state_show(struct class_device *cdev, char *buf)
151{ 169{
152 struct Scsi_Host *host = class_to_shost(cdev); 170 struct Scsi_Host *shost = class_to_shost(cdev);
153 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 171 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
154 int len = 0; 172 struct lpfc_hba *phba = vport->phba;
155 switch (phba->hba_state) { 173 int len = 0;
156 case LPFC_STATE_UNKNOWN: 174
175 switch (phba->link_state) {
176 case LPFC_LINK_UNKNOWN:
157 case LPFC_WARM_START: 177 case LPFC_WARM_START:
158 case LPFC_INIT_START: 178 case LPFC_INIT_START:
159 case LPFC_INIT_MBX_CMDS: 179 case LPFC_INIT_MBX_CMDS:
160 case LPFC_LINK_DOWN: 180 case LPFC_LINK_DOWN:
181 case LPFC_HBA_ERROR:
161 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); 182 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
162 break; 183 break;
163 case LPFC_LINK_UP: 184 case LPFC_LINK_UP:
164 case LPFC_LOCAL_CFG_LINK:
165 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n");
166 break;
167 case LPFC_FLOGI:
168 case LPFC_FABRIC_CFG_LINK:
169 case LPFC_NS_REG:
170 case LPFC_NS_QRY:
171 case LPFC_BUILD_DISC_LIST:
172 case LPFC_DISC_AUTH:
173 case LPFC_CLEAR_LA: 185 case LPFC_CLEAR_LA:
174 len += snprintf(buf + len, PAGE_SIZE-len,
175 "Link Up - Discovery\n");
176 break;
177 case LPFC_HBA_READY: 186 case LPFC_HBA_READY:
178 len += snprintf(buf + len, PAGE_SIZE-len, 187 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n");
179 "Link Up - Ready:\n"); 188
189 switch (vport->port_state) {
190 len += snprintf(buf + len, PAGE_SIZE-len,
191 "initializing\n");
192 break;
193 case LPFC_LOCAL_CFG_LINK:
194 len += snprintf(buf + len, PAGE_SIZE-len,
195 "Configuring Link\n");
196 break;
197 case LPFC_FDISC:
198 case LPFC_FLOGI:
199 case LPFC_FABRIC_CFG_LINK:
200 case LPFC_NS_REG:
201 case LPFC_NS_QRY:
202 case LPFC_BUILD_DISC_LIST:
203 case LPFC_DISC_AUTH:
204 len += snprintf(buf + len, PAGE_SIZE - len,
205 "Discovery\n");
206 break;
207 case LPFC_VPORT_READY:
208 len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
209 break;
210
211 case LPFC_VPORT_FAILED:
212 len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
213 break;
214
215 case LPFC_VPORT_UNKNOWN:
216 len += snprintf(buf + len, PAGE_SIZE - len,
217 "Unknown\n");
218 break;
219 }
220
180 if (phba->fc_topology == TOPOLOGY_LOOP) { 221 if (phba->fc_topology == TOPOLOGY_LOOP) {
181 if (phba->fc_flag & FC_PUBLIC_LOOP) 222 if (vport->fc_flag & FC_PUBLIC_LOOP)
182 len += snprintf(buf + len, PAGE_SIZE-len, 223 len += snprintf(buf + len, PAGE_SIZE-len,
183 " Public Loop\n"); 224 " Public Loop\n");
184 else 225 else
185 len += snprintf(buf + len, PAGE_SIZE-len, 226 len += snprintf(buf + len, PAGE_SIZE-len,
186 " Private Loop\n"); 227 " Private Loop\n");
187 } else { 228 } else {
188 if (phba->fc_flag & FC_FABRIC) 229 if (vport->fc_flag & FC_FABRIC)
189 len += snprintf(buf + len, PAGE_SIZE-len, 230 len += snprintf(buf + len, PAGE_SIZE-len,
190 " Fabric\n"); 231 " Fabric\n");
191 else 232 else
@@ -193,29 +234,32 @@ lpfc_state_show(struct class_device *cdev, char *buf)
193 " Point-2-Point\n"); 234 " Point-2-Point\n");
194 } 235 }
195 } 236 }
237
196 return len; 238 return len;
197} 239}
198 240
199static ssize_t 241static ssize_t
200lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf) 242lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
201{ 243{
202 struct Scsi_Host *host = class_to_shost(cdev); 244 struct Scsi_Host *shost = class_to_shost(cdev);
203 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 245 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
204 return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt + 246
205 phba->fc_unmap_cnt); 247 return snprintf(buf, PAGE_SIZE, "%d\n",
248 vport->fc_map_cnt + vport->fc_unmap_cnt);
206} 249}
207 250
208 251
209static int 252static int
210lpfc_issue_lip(struct Scsi_Host *host) 253lpfc_issue_lip(struct Scsi_Host *shost)
211{ 254{
212 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; 255 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
256 struct lpfc_hba *phba = vport->phba;
213 LPFC_MBOXQ_t *pmboxq; 257 LPFC_MBOXQ_t *pmboxq;
214 int mbxstatus = MBXERR_ERROR; 258 int mbxstatus = MBXERR_ERROR;
215 259
216 if ((phba->fc_flag & FC_OFFLINE_MODE) || 260 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
217 (phba->fc_flag & FC_BLOCK_MGMT_IO) || 261 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) ||
218 (phba->hba_state != LPFC_HBA_READY)) 262 (vport->port_state != LPFC_VPORT_READY))
219 return -EPERM; 263 return -EPERM;
220 264
221 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 265 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
@@ -238,9 +282,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
238 } 282 }
239 283
240 lpfc_set_loopback_flag(phba); 284 lpfc_set_loopback_flag(phba);
241 if (mbxstatus == MBX_TIMEOUT) 285 if (mbxstatus != MBX_TIMEOUT)
242 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
243 else
244 mempool_free(pmboxq, phba->mbox_mem_pool); 286 mempool_free(pmboxq, phba->mbox_mem_pool);
245 287
246 if (mbxstatus == MBXERR_ERROR) 288 if (mbxstatus == MBXERR_ERROR)
@@ -320,8 +362,10 @@ lpfc_selective_reset(struct lpfc_hba *phba)
320static ssize_t 362static ssize_t
321lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count) 363lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
322{ 364{
323 struct Scsi_Host *host = class_to_shost(cdev); 365 struct Scsi_Host *shost = class_to_shost(cdev);
324 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 366 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
367 struct lpfc_hba *phba = vport->phba;
368
325 int status = -EINVAL; 369 int status = -EINVAL;
326 370
327 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) 371 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
@@ -336,23 +380,26 @@ lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
336static ssize_t 380static ssize_t
337lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) 381lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
338{ 382{
339 struct Scsi_Host *host = class_to_shost(cdev); 383 struct Scsi_Host *shost = class_to_shost(cdev);
340 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 384 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
385 struct lpfc_hba *phba = vport->phba;
386
341 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 387 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
342} 388}
343 389
344static ssize_t 390static ssize_t
345lpfc_board_mode_show(struct class_device *cdev, char *buf) 391lpfc_board_mode_show(struct class_device *cdev, char *buf)
346{ 392{
347 struct Scsi_Host *host = class_to_shost(cdev); 393 struct Scsi_Host *shost = class_to_shost(cdev);
348 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 394 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
395 struct lpfc_hba *phba = vport->phba;
349 char * state; 396 char * state;
350 397
351 if (phba->hba_state == LPFC_HBA_ERROR) 398 if (phba->link_state == LPFC_HBA_ERROR)
352 state = "error"; 399 state = "error";
353 else if (phba->hba_state == LPFC_WARM_START) 400 else if (phba->link_state == LPFC_WARM_START)
354 state = "warm start"; 401 state = "warm start";
355 else if (phba->hba_state == LPFC_INIT_START) 402 else if (phba->link_state == LPFC_INIT_START)
356 state = "offline"; 403 state = "offline";
357 else 404 else
358 state = "online"; 405 state = "online";
@@ -363,8 +410,9 @@ lpfc_board_mode_show(struct class_device *cdev, char *buf)
363static ssize_t 410static ssize_t
364lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count) 411lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
365{ 412{
366 struct Scsi_Host *host = class_to_shost(cdev); 413 struct Scsi_Host *shost = class_to_shost(cdev);
367 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 414 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
415 struct lpfc_hba *phba = vport->phba;
368 struct completion online_compl; 416 struct completion online_compl;
369 int status=0; 417 int status=0;
370 418
@@ -389,11 +437,166 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
389 return -EIO; 437 return -EIO;
390} 438}
391 439
440int
441lpfc_get_hba_info(struct lpfc_hba *phba,
442 uint32_t *mxri, uint32_t *axri,
443 uint32_t *mrpi, uint32_t *arpi,
444 uint32_t *mvpi, uint32_t *avpi)
445{
446 struct lpfc_sli *psli = &phba->sli;
447 LPFC_MBOXQ_t *pmboxq;
448 MAILBOX_t *pmb;
449 int rc = 0;
450
451 /*
452 * prevent udev from issuing mailbox commands until the port is
453 * configured.
454 */
455 if (phba->link_state < LPFC_LINK_DOWN ||
456 !phba->mbox_mem_pool ||
457 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
458 return 0;
459
460 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
461 return 0;
462
463 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
464 if (!pmboxq)
465 return 0;
466 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
467
468 pmb = &pmboxq->mb;
469 pmb->mbxCommand = MBX_READ_CONFIG;
470 pmb->mbxOwner = OWN_HOST;
471 pmboxq->context1 = NULL;
472
473 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
474 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
475 rc = MBX_NOT_FINISHED;
476 else
477 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
478
479 if (rc != MBX_SUCCESS) {
480 if (rc != MBX_TIMEOUT)
481 mempool_free(pmboxq, phba->mbox_mem_pool);
482 return 0;
483 }
484
485 if (mrpi)
486 *mrpi = pmb->un.varRdConfig.max_rpi;
487 if (arpi)
488 *arpi = pmb->un.varRdConfig.avail_rpi;
489 if (mxri)
490 *mxri = pmb->un.varRdConfig.max_xri;
491 if (axri)
492 *axri = pmb->un.varRdConfig.avail_xri;
493 if (mvpi)
494 *mvpi = pmb->un.varRdConfig.max_vpi;
495 if (avpi)
496 *avpi = pmb->un.varRdConfig.avail_vpi;
497
498 mempool_free(pmboxq, phba->mbox_mem_pool);
499 return 1;
500}
501
502static ssize_t
503lpfc_max_rpi_show(struct class_device *cdev, char *buf)
504{
505 struct Scsi_Host *shost = class_to_shost(cdev);
506 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
507 struct lpfc_hba *phba = vport->phba;
508 uint32_t cnt;
509
510 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
511 return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
512 return snprintf(buf, PAGE_SIZE, "Unknown\n");
513}
514
515static ssize_t
516lpfc_used_rpi_show(struct class_device *cdev, char *buf)
517{
518 struct Scsi_Host *shost = class_to_shost(cdev);
519 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
520 struct lpfc_hba *phba = vport->phba;
521 uint32_t cnt, acnt;
522
523 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
524 return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
525 return snprintf(buf, PAGE_SIZE, "Unknown\n");
526}
527
528static ssize_t
529lpfc_max_xri_show(struct class_device *cdev, char *buf)
530{
531 struct Scsi_Host *shost = class_to_shost(cdev);
532 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
533 struct lpfc_hba *phba = vport->phba;
534 uint32_t cnt;
535
536 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
537 return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
538 return snprintf(buf, PAGE_SIZE, "Unknown\n");
539}
540
541static ssize_t
542lpfc_used_xri_show(struct class_device *cdev, char *buf)
543{
544 struct Scsi_Host *shost = class_to_shost(cdev);
545 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
546 struct lpfc_hba *phba = vport->phba;
547 uint32_t cnt, acnt;
548
549 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
550 return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
551 return snprintf(buf, PAGE_SIZE, "Unknown\n");
552}
553
554static ssize_t
555lpfc_max_vpi_show(struct class_device *cdev, char *buf)
556{
557 struct Scsi_Host *shost = class_to_shost(cdev);
558 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
559 struct lpfc_hba *phba = vport->phba;
560 uint32_t cnt;
561
562 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
563 return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
564 return snprintf(buf, PAGE_SIZE, "Unknown\n");
565}
566
567static ssize_t
568lpfc_used_vpi_show(struct class_device *cdev, char *buf)
569{
570 struct Scsi_Host *shost = class_to_shost(cdev);
571 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
572 struct lpfc_hba *phba = vport->phba;
573 uint32_t cnt, acnt;
574
575 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
576 return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
577 return snprintf(buf, PAGE_SIZE, "Unknown\n");
578}
579
580static ssize_t
581lpfc_npiv_info_show(struct class_device *cdev, char *buf)
582{
583 struct Scsi_Host *shost = class_to_shost(cdev);
584 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
585 struct lpfc_hba *phba = vport->phba;
586
587 if (!(phba->max_vpi))
588 return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
589 if (vport->port_type == LPFC_PHYSICAL_PORT)
590 return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
591 return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
592}
593
392static ssize_t 594static ssize_t
393lpfc_poll_show(struct class_device *cdev, char *buf) 595lpfc_poll_show(struct class_device *cdev, char *buf)
394{ 596{
395 struct Scsi_Host *host = class_to_shost(cdev); 597 struct Scsi_Host *shost = class_to_shost(cdev);
396 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 598 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
599 struct lpfc_hba *phba = vport->phba;
397 600
398 return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); 601 return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
399} 602}
@@ -402,8 +605,9 @@ static ssize_t
402lpfc_poll_store(struct class_device *cdev, const char *buf, 605lpfc_poll_store(struct class_device *cdev, const char *buf,
403 size_t count) 606 size_t count)
404{ 607{
405 struct Scsi_Host *host = class_to_shost(cdev); 608 struct Scsi_Host *shost = class_to_shost(cdev);
406 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 609 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
610 struct lpfc_hba *phba = vport->phba;
407 uint32_t creg_val; 611 uint32_t creg_val;
408 uint32_t old_val; 612 uint32_t old_val;
409 int val=0; 613 int val=0;
@@ -417,7 +621,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
417 if ((val & 0x3) != val) 621 if ((val & 0x3) != val)
418 return -EINVAL; 622 return -EINVAL;
419 623
420 spin_lock_irq(phba->host->host_lock); 624 spin_lock_irq(&phba->hbalock);
421 625
422 old_val = phba->cfg_poll; 626 old_val = phba->cfg_poll;
423 627
@@ -432,16 +636,16 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
432 lpfc_poll_start_timer(phba); 636 lpfc_poll_start_timer(phba);
433 } 637 }
434 } else if (val != 0x0) { 638 } else if (val != 0x0) {
435 spin_unlock_irq(phba->host->host_lock); 639 spin_unlock_irq(&phba->hbalock);
436 return -EINVAL; 640 return -EINVAL;
437 } 641 }
438 642
439 if (!(val & DISABLE_FCP_RING_INT) && 643 if (!(val & DISABLE_FCP_RING_INT) &&
440 (old_val & DISABLE_FCP_RING_INT)) 644 (old_val & DISABLE_FCP_RING_INT))
441 { 645 {
442 spin_unlock_irq(phba->host->host_lock); 646 spin_unlock_irq(&phba->hbalock);
443 del_timer(&phba->fcp_poll_timer); 647 del_timer(&phba->fcp_poll_timer);
444 spin_lock_irq(phba->host->host_lock); 648 spin_lock_irq(&phba->hbalock);
445 creg_val = readl(phba->HCregaddr); 649 creg_val = readl(phba->HCregaddr);
446 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 650 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
447 writel(creg_val, phba->HCregaddr); 651 writel(creg_val, phba->HCregaddr);
@@ -450,7 +654,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
450 654
451 phba->cfg_poll = val; 655 phba->cfg_poll = val;
452 656
453 spin_unlock_irq(phba->host->host_lock); 657 spin_unlock_irq(&phba->hbalock);
454 658
455 return strlen(buf); 659 return strlen(buf);
456} 660}
@@ -459,8 +663,9 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
459static ssize_t \ 663static ssize_t \
460lpfc_##attr##_show(struct class_device *cdev, char *buf) \ 664lpfc_##attr##_show(struct class_device *cdev, char *buf) \
461{ \ 665{ \
462 struct Scsi_Host *host = class_to_shost(cdev);\ 666 struct Scsi_Host *shost = class_to_shost(cdev);\
463 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\ 667 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
668 struct lpfc_hba *phba = vport->phba;\
464 int val = 0;\ 669 int val = 0;\
465 val = phba->cfg_##attr;\ 670 val = phba->cfg_##attr;\
466 return snprintf(buf, PAGE_SIZE, "%d\n",\ 671 return snprintf(buf, PAGE_SIZE, "%d\n",\
@@ -471,8 +676,9 @@ lpfc_##attr##_show(struct class_device *cdev, char *buf) \
471static ssize_t \ 676static ssize_t \
472lpfc_##attr##_show(struct class_device *cdev, char *buf) \ 677lpfc_##attr##_show(struct class_device *cdev, char *buf) \
473{ \ 678{ \
474 struct Scsi_Host *host = class_to_shost(cdev);\ 679 struct Scsi_Host *shost = class_to_shost(cdev);\
475 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\ 680 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
681 struct lpfc_hba *phba = vport->phba;\
476 int val = 0;\ 682 int val = 0;\
477 val = phba->cfg_##attr;\ 683 val = phba->cfg_##attr;\
478 return snprintf(buf, PAGE_SIZE, "%#x\n",\ 684 return snprintf(buf, PAGE_SIZE, "%#x\n",\
@@ -514,8 +720,9 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
514static ssize_t \ 720static ssize_t \
515lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \ 721lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
516{ \ 722{ \
517 struct Scsi_Host *host = class_to_shost(cdev);\ 723 struct Scsi_Host *shost = class_to_shost(cdev);\
518 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\ 724 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
725 struct lpfc_hba *phba = vport->phba;\
519 int val=0;\ 726 int val=0;\
520 if (!isdigit(buf[0]))\ 727 if (!isdigit(buf[0]))\
521 return -EINVAL;\ 728 return -EINVAL;\
@@ -576,7 +783,7 @@ static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
576static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); 783static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
577static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); 784static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
578static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); 785static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
579static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_portnum_show, NULL); 786static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
580static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 787static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
581static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 788static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
582static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL); 789static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
@@ -592,6 +799,13 @@ static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
592static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 799static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
593 lpfc_board_mode_show, lpfc_board_mode_store); 800 lpfc_board_mode_show, lpfc_board_mode_store);
594static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 801static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
802static CLASS_DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
803static CLASS_DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
804static CLASS_DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
805static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
806static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
807static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
808static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
595 809
596 810
597static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 811static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -600,8 +814,9 @@ static ssize_t
600lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf, 814lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
601 size_t count) 815 size_t count)
602{ 816{
603 struct Scsi_Host *host = class_to_shost(cdev); 817 struct Scsi_Host *shost = class_to_shost(cdev);
604 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 818 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
819 struct lpfc_hba *phba = vport->phba;
605 unsigned int cnt = count; 820 unsigned int cnt = count;
606 821
607 /* 822 /*
@@ -634,8 +849,10 @@ static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
634static ssize_t 849static ssize_t
635lpfc_soft_wwpn_show(struct class_device *cdev, char *buf) 850lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
636{ 851{
637 struct Scsi_Host *host = class_to_shost(cdev); 852 struct Scsi_Host *shost = class_to_shost(cdev);
638 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 853 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
854 struct lpfc_hba *phba = vport->phba;
855
639 return snprintf(buf, PAGE_SIZE, "0x%llx\n", 856 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
640 (unsigned long long)phba->cfg_soft_wwpn); 857 (unsigned long long)phba->cfg_soft_wwpn);
641} 858}
@@ -644,8 +861,9 @@ lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
644static ssize_t 861static ssize_t
645lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count) 862lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
646{ 863{
647 struct Scsi_Host *host = class_to_shost(cdev); 864 struct Scsi_Host *shost = class_to_shost(cdev);
648 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 865 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
866 struct lpfc_hba *phba = vport->phba;
649 struct completion online_compl; 867 struct completion online_compl;
650 int stat1=0, stat2=0; 868 int stat1=0, stat2=0;
651 unsigned int i, j, cnt=count; 869 unsigned int i, j, cnt=count;
@@ -680,9 +898,9 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
680 } 898 }
681 } 899 }
682 phba->cfg_soft_wwpn = wwn_to_u64(wwpn); 900 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
683 fc_host_port_name(host) = phba->cfg_soft_wwpn; 901 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
684 if (phba->cfg_soft_wwnn) 902 if (phba->cfg_soft_wwnn)
685 fc_host_node_name(host) = phba->cfg_soft_wwnn; 903 fc_host_node_name(shost) = phba->cfg_soft_wwnn;
686 904
687 dev_printk(KERN_NOTICE, &phba->pcidev->dev, 905 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
688 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); 906 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
@@ -777,6 +995,15 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
777static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, 995static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
778 lpfc_poll_show, lpfc_poll_store); 996 lpfc_poll_show, lpfc_poll_store);
779 997
998int lpfc_sli_mode = 0;
999module_param(lpfc_sli_mode, int, 0);
1000MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
1001 " 0 - auto (SLI-3 if supported),"
1002 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
1003 " 3 - select SLI-3");
1004
1005LPFC_ATTR_R(npiv_enable, 0, 0, 1, "Enable NPIV functionality");
1006
780/* 1007/*
781# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1008# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
782# until the timer expires. Value range is [0,255]. Default value is 30. 1009# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -790,8 +1017,9 @@ MODULE_PARM_DESC(lpfc_nodev_tmo,
790static ssize_t 1017static ssize_t
791lpfc_nodev_tmo_show(struct class_device *cdev, char *buf) 1018lpfc_nodev_tmo_show(struct class_device *cdev, char *buf)
792{ 1019{
793 struct Scsi_Host *host = class_to_shost(cdev); 1020 struct Scsi_Host *shost = class_to_shost(cdev);
794 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1021 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1022 struct lpfc_hba *phba = vport->phba;
795 int val = 0; 1023 int val = 0;
796 val = phba->cfg_devloss_tmo; 1024 val = phba->cfg_devloss_tmo;
797 return snprintf(buf, PAGE_SIZE, "%d\n", 1025 return snprintf(buf, PAGE_SIZE, "%d\n",
@@ -832,13 +1060,19 @@ lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
832static void 1060static void
833lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba) 1061lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba)
834{ 1062{
1063 struct lpfc_vport *vport;
1064 struct Scsi_Host *shost;
835 struct lpfc_nodelist *ndlp; 1065 struct lpfc_nodelist *ndlp;
836 1066
837 spin_lock_irq(phba->host->host_lock); 1067 list_for_each_entry(vport, &phba->port_list, listentry) {
838 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) 1068 shost = lpfc_shost_from_vport(vport);
839 if (ndlp->rport) 1069 spin_lock_irq(shost->host_lock);
840 ndlp->rport->dev_loss_tmo = phba->cfg_devloss_tmo; 1070 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
841 spin_unlock_irq(phba->host->host_lock); 1071 if (ndlp->rport)
1072 ndlp->rport->dev_loss_tmo =
1073 phba->cfg_devloss_tmo;
1074 spin_unlock_irq(shost->host_lock);
1075 }
842} 1076}
843 1077
844static int 1078static int
@@ -946,6 +1180,33 @@ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
946 "Max number of FCP commands we can queue to a lpfc HBA"); 1180 "Max number of FCP commands we can queue to a lpfc HBA");
947 1181
948/* 1182/*
1183# peer_port_login: This parameter allows/prevents logins
1184# between peer ports hosted on the same physical port.
1185# When this parameter is set 0 peer ports of same physical port
1186# are not allowed to login to each other.
1187# When this parameter is set 1 peer ports of same physical port
1188# are allowed to login to each other.
1189# Default value of this parameter is 0.
1190*/
1191LPFC_ATTR_R(peer_port_login, 0, 0, 1,
1192 "Allow peer ports on the same physical port to login to each "
1193 "other.");
1194
1195/*
1196# vport_restrict_login: This parameter allows/prevents logins
1197# between Virtual Ports and remote initiators.
1198# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
1199# other initiators and will attempt to PLOGI all remote ports.
1200# When this parameter is set (1) Virtual Ports will reject PLOGIs from
1201# remote ports and will not attempt to PLOGI to other initiators.
1202# This parameter does not restrict to the physical port.
1203# This parameter does not restrict logins to Fabric resident remote ports.
1204# Default value of this parameter is 1.
1205*/
1206LPFC_ATTR_RW(vport_restrict_login, 1, 0, 1,
1207 "Restrict virtual ports login to remote initiators.");
1208
1209/*
949# Some disk devices have a "select ID" or "select Target" capability. 1210# Some disk devices have a "select ID" or "select Target" capability.
950# From a protocol standpoint "select ID" usually means select the 1211# From a protocol standpoint "select ID" usually means select the
951# Fibre channel "ALPA". In the FC-AL Profile there is an "informative 1212# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
@@ -1088,7 +1349,8 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
1088LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); 1349LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
1089 1350
1090 1351
1091struct class_device_attribute *lpfc_host_attrs[] = { 1352
1353struct class_device_attribute *lpfc_hba_attrs[] = {
1092 &class_device_attr_info, 1354 &class_device_attr_info,
1093 &class_device_attr_serialnum, 1355 &class_device_attr_serialnum,
1094 &class_device_attr_modeldesc, 1356 &class_device_attr_modeldesc,
@@ -1104,6 +1366,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
1104 &class_device_attr_lpfc_log_verbose, 1366 &class_device_attr_lpfc_log_verbose,
1105 &class_device_attr_lpfc_lun_queue_depth, 1367 &class_device_attr_lpfc_lun_queue_depth,
1106 &class_device_attr_lpfc_hba_queue_depth, 1368 &class_device_attr_lpfc_hba_queue_depth,
1369 &class_device_attr_lpfc_peer_port_login,
1370 &class_device_attr_lpfc_vport_restrict_login,
1107 &class_device_attr_lpfc_nodev_tmo, 1371 &class_device_attr_lpfc_nodev_tmo,
1108 &class_device_attr_lpfc_devloss_tmo, 1372 &class_device_attr_lpfc_devloss_tmo,
1109 &class_device_attr_lpfc_fcp_class, 1373 &class_device_attr_lpfc_fcp_class,
@@ -1119,9 +1383,17 @@ struct class_device_attribute *lpfc_host_attrs[] = {
1119 &class_device_attr_lpfc_multi_ring_type, 1383 &class_device_attr_lpfc_multi_ring_type,
1120 &class_device_attr_lpfc_fdmi_on, 1384 &class_device_attr_lpfc_fdmi_on,
1121 &class_device_attr_lpfc_max_luns, 1385 &class_device_attr_lpfc_max_luns,
1386 &class_device_attr_lpfc_npiv_enable,
1122 &class_device_attr_nport_evt_cnt, 1387 &class_device_attr_nport_evt_cnt,
1123 &class_device_attr_management_version, 1388 &class_device_attr_management_version,
1124 &class_device_attr_board_mode, 1389 &class_device_attr_board_mode,
1390 &class_device_attr_max_vpi,
1391 &class_device_attr_used_vpi,
1392 &class_device_attr_max_rpi,
1393 &class_device_attr_used_rpi,
1394 &class_device_attr_max_xri,
1395 &class_device_attr_used_xri,
1396 &class_device_attr_npiv_info,
1125 &class_device_attr_issue_reset, 1397 &class_device_attr_issue_reset,
1126 &class_device_attr_lpfc_poll, 1398 &class_device_attr_lpfc_poll,
1127 &class_device_attr_lpfc_poll_tmo, 1399 &class_device_attr_lpfc_poll_tmo,
@@ -1137,9 +1409,11 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1137 char *buf, loff_t off, size_t count) 1409 char *buf, loff_t off, size_t count)
1138{ 1410{
1139 size_t buf_off; 1411 size_t buf_off;
1140 struct Scsi_Host *host = class_to_shost(container_of(kobj, 1412 struct class_device *cdev = container_of(kobj, struct class_device,
1141 struct class_device, kobj)); 1413 kobj);
1142 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1414 struct Scsi_Host *shost = class_to_shost(cdev);
1415 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1416 struct lpfc_hba *phba = vport->phba;
1143 1417
1144 if ((off + count) > FF_REG_AREA_SIZE) 1418 if ((off + count) > FF_REG_AREA_SIZE)
1145 return -ERANGE; 1419 return -ERANGE;
@@ -1149,18 +1423,16 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1149 if (off % 4 || count % 4 || (unsigned long)buf % 4) 1423 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1150 return -EINVAL; 1424 return -EINVAL;
1151 1425
1152 spin_lock_irq(phba->host->host_lock); 1426 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
1153
1154 if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
1155 spin_unlock_irq(phba->host->host_lock);
1156 return -EPERM; 1427 return -EPERM;
1157 } 1428 }
1158 1429
1430 spin_lock_irq(&phba->hbalock);
1159 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) 1431 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
1160 writel(*((uint32_t *)(buf + buf_off)), 1432 writel(*((uint32_t *)(buf + buf_off)),
1161 phba->ctrl_regs_memmap_p + off + buf_off); 1433 phba->ctrl_regs_memmap_p + off + buf_off);
1162 1434
1163 spin_unlock_irq(phba->host->host_lock); 1435 spin_unlock_irq(&phba->hbalock);
1164 1436
1165 return count; 1437 return count;
1166} 1438}
@@ -1171,9 +1443,11 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1171{ 1443{
1172 size_t buf_off; 1444 size_t buf_off;
1173 uint32_t * tmp_ptr; 1445 uint32_t * tmp_ptr;
1174 struct Scsi_Host *host = class_to_shost(container_of(kobj, 1446 struct class_device *cdev = container_of(kobj, struct class_device,
1175 struct class_device, kobj)); 1447 kobj);
1176 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1448 struct Scsi_Host *shost = class_to_shost(cdev);
1449 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1450 struct lpfc_hba *phba = vport->phba;
1177 1451
1178 if (off > FF_REG_AREA_SIZE) 1452 if (off > FF_REG_AREA_SIZE)
1179 return -ERANGE; 1453 return -ERANGE;
@@ -1186,14 +1460,14 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1186 if (off % 4 || count % 4 || (unsigned long)buf % 4) 1460 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1187 return -EINVAL; 1461 return -EINVAL;
1188 1462
1189 spin_lock_irq(phba->host->host_lock); 1463 spin_lock_irq(&phba->hbalock);
1190 1464
1191 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { 1465 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
1192 tmp_ptr = (uint32_t *)(buf + buf_off); 1466 tmp_ptr = (uint32_t *)(buf + buf_off);
1193 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); 1467 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
1194 } 1468 }
1195 1469
1196 spin_unlock_irq(phba->host->host_lock); 1470 spin_unlock_irq(&phba->hbalock);
1197 1471
1198 return count; 1472 return count;
1199} 1473}
@@ -1210,7 +1484,7 @@ static struct bin_attribute sysfs_ctlreg_attr = {
1210 1484
1211 1485
1212static void 1486static void
1213sysfs_mbox_idle (struct lpfc_hba * phba) 1487sysfs_mbox_idle(struct lpfc_hba *phba)
1214{ 1488{
1215 phba->sysfs_mbox.state = SMBOX_IDLE; 1489 phba->sysfs_mbox.state = SMBOX_IDLE;
1216 phba->sysfs_mbox.offset = 0; 1490 phba->sysfs_mbox.offset = 0;
@@ -1226,10 +1500,12 @@ static ssize_t
1226sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, 1500sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1227 char *buf, loff_t off, size_t count) 1501 char *buf, loff_t off, size_t count)
1228{ 1502{
1229 struct Scsi_Host * host = 1503 struct class_device *cdev = container_of(kobj, struct class_device,
1230 class_to_shost(container_of(kobj, struct class_device, kobj)); 1504 kobj);
1231 struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata; 1505 struct Scsi_Host *shost = class_to_shost(cdev);
1232 struct lpfcMboxq * mbox = NULL; 1506 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1507 struct lpfc_hba *phba = vport->phba;
1508 struct lpfcMboxq *mbox = NULL;
1233 1509
1234 if ((count + off) > MAILBOX_CMD_SIZE) 1510 if ((count + off) > MAILBOX_CMD_SIZE)
1235 return -ERANGE; 1511 return -ERANGE;
@@ -1247,7 +1523,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1247 memset(mbox, 0, sizeof (LPFC_MBOXQ_t)); 1523 memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
1248 } 1524 }
1249 1525
1250 spin_lock_irq(host->host_lock); 1526 spin_lock_irq(&phba->hbalock);
1251 1527
1252 if (off == 0) { 1528 if (off == 0) {
1253 if (phba->sysfs_mbox.mbox) 1529 if (phba->sysfs_mbox.mbox)
@@ -1258,9 +1534,9 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1258 } else { 1534 } else {
1259 if (phba->sysfs_mbox.state != SMBOX_WRITING || 1535 if (phba->sysfs_mbox.state != SMBOX_WRITING ||
1260 phba->sysfs_mbox.offset != off || 1536 phba->sysfs_mbox.offset != off ||
1261 phba->sysfs_mbox.mbox == NULL ) { 1537 phba->sysfs_mbox.mbox == NULL) {
1262 sysfs_mbox_idle(phba); 1538 sysfs_mbox_idle(phba);
1263 spin_unlock_irq(host->host_lock); 1539 spin_unlock_irq(&phba->hbalock);
1264 return -EAGAIN; 1540 return -EAGAIN;
1265 } 1541 }
1266 } 1542 }
@@ -1270,7 +1546,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1270 1546
1271 phba->sysfs_mbox.offset = off + count; 1547 phba->sysfs_mbox.offset = off + count;
1272 1548
1273 spin_unlock_irq(host->host_lock); 1549 spin_unlock_irq(&phba->hbalock);
1274 1550
1275 return count; 1551 return count;
1276} 1552}
@@ -1279,10 +1555,11 @@ static ssize_t
1279sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, 1555sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1280 char *buf, loff_t off, size_t count) 1556 char *buf, loff_t off, size_t count)
1281{ 1557{
1282 struct Scsi_Host *host = 1558 struct class_device *cdev = container_of(kobj, struct class_device,
1283 class_to_shost(container_of(kobj, struct class_device, 1559 kobj);
1284 kobj)); 1560 struct Scsi_Host *shost = class_to_shost(cdev);
1285 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1561 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1562 struct lpfc_hba *phba = vport->phba;
1286 int rc; 1563 int rc;
1287 1564
1288 if (off > MAILBOX_CMD_SIZE) 1565 if (off > MAILBOX_CMD_SIZE)
@@ -1297,7 +1574,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1297 if (off && count == 0) 1574 if (off && count == 0)
1298 return 0; 1575 return 0;
1299 1576
1300 spin_lock_irq(phba->host->host_lock); 1577 spin_lock_irq(&phba->hbalock);
1301 1578
1302 if (off == 0 && 1579 if (off == 0 &&
1303 phba->sysfs_mbox.state == SMBOX_WRITING && 1580 phba->sysfs_mbox.state == SMBOX_WRITING &&
@@ -1320,12 +1597,12 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1320 case MBX_SET_MASK: 1597 case MBX_SET_MASK:
1321 case MBX_SET_SLIM: 1598 case MBX_SET_SLIM:
1322 case MBX_SET_DEBUG: 1599 case MBX_SET_DEBUG:
1323 if (!(phba->fc_flag & FC_OFFLINE_MODE)) { 1600 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
1324 printk(KERN_WARNING "mbox_read:Command 0x%x " 1601 printk(KERN_WARNING "mbox_read:Command 0x%x "
1325 "is illegal in on-line state\n", 1602 "is illegal in on-line state\n",
1326 phba->sysfs_mbox.mbox->mb.mbxCommand); 1603 phba->sysfs_mbox.mbox->mb.mbxCommand);
1327 sysfs_mbox_idle(phba); 1604 sysfs_mbox_idle(phba);
1328 spin_unlock_irq(phba->host->host_lock); 1605 spin_unlock_irq(&phba->hbalock);
1329 return -EPERM; 1606 return -EPERM;
1330 } 1607 }
1331 case MBX_LOAD_SM: 1608 case MBX_LOAD_SM:
@@ -1355,48 +1632,48 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1355 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", 1632 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
1356 phba->sysfs_mbox.mbox->mb.mbxCommand); 1633 phba->sysfs_mbox.mbox->mb.mbxCommand);
1357 sysfs_mbox_idle(phba); 1634 sysfs_mbox_idle(phba);
1358 spin_unlock_irq(phba->host->host_lock); 1635 spin_unlock_irq(&phba->hbalock);
1359 return -EPERM; 1636 return -EPERM;
1360 default: 1637 default:
1361 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", 1638 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
1362 phba->sysfs_mbox.mbox->mb.mbxCommand); 1639 phba->sysfs_mbox.mbox->mb.mbxCommand);
1363 sysfs_mbox_idle(phba); 1640 sysfs_mbox_idle(phba);
1364 spin_unlock_irq(phba->host->host_lock); 1641 spin_unlock_irq(&phba->hbalock);
1365 return -EPERM; 1642 return -EPERM;
1366 } 1643 }
1367 1644
1368 if (phba->fc_flag & FC_BLOCK_MGMT_IO) { 1645 phba->sysfs_mbox.mbox->vport = vport;
1646
1647 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
1369 sysfs_mbox_idle(phba); 1648 sysfs_mbox_idle(phba);
1370 spin_unlock_irq(host->host_lock); 1649 spin_unlock_irq(&phba->hbalock);
1371 return -EAGAIN; 1650 return -EAGAIN;
1372 } 1651 }
1373 1652
1374 if ((phba->fc_flag & FC_OFFLINE_MODE) || 1653 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1375 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 1654 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
1376 1655
1377 spin_unlock_irq(phba->host->host_lock); 1656 spin_unlock_irq(&phba->hbalock);
1378 rc = lpfc_sli_issue_mbox (phba, 1657 rc = lpfc_sli_issue_mbox (phba,
1379 phba->sysfs_mbox.mbox, 1658 phba->sysfs_mbox.mbox,
1380 MBX_POLL); 1659 MBX_POLL);
1381 spin_lock_irq(phba->host->host_lock); 1660 spin_lock_irq(&phba->hbalock);
1382 1661
1383 } else { 1662 } else {
1384 spin_unlock_irq(phba->host->host_lock); 1663 spin_unlock_irq(&phba->hbalock);
1385 rc = lpfc_sli_issue_mbox_wait (phba, 1664 rc = lpfc_sli_issue_mbox_wait (phba,
1386 phba->sysfs_mbox.mbox, 1665 phba->sysfs_mbox.mbox,
1387 lpfc_mbox_tmo_val(phba, 1666 lpfc_mbox_tmo_val(phba,
1388 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); 1667 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
1389 spin_lock_irq(phba->host->host_lock); 1668 spin_lock_irq(&phba->hbalock);
1390 } 1669 }
1391 1670
1392 if (rc != MBX_SUCCESS) { 1671 if (rc != MBX_SUCCESS) {
1393 if (rc == MBX_TIMEOUT) { 1672 if (rc == MBX_TIMEOUT) {
1394 phba->sysfs_mbox.mbox->mbox_cmpl =
1395 lpfc_sli_def_mbox_cmpl;
1396 phba->sysfs_mbox.mbox = NULL; 1673 phba->sysfs_mbox.mbox = NULL;
1397 } 1674 }
1398 sysfs_mbox_idle(phba); 1675 sysfs_mbox_idle(phba);
1399 spin_unlock_irq(host->host_lock); 1676 spin_unlock_irq(&phba->hbalock);
1400 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 1677 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
1401 } 1678 }
1402 phba->sysfs_mbox.state = SMBOX_READING; 1679 phba->sysfs_mbox.state = SMBOX_READING;
@@ -1405,7 +1682,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1405 phba->sysfs_mbox.state != SMBOX_READING) { 1682 phba->sysfs_mbox.state != SMBOX_READING) {
1406 printk(KERN_WARNING "mbox_read: Bad State\n"); 1683 printk(KERN_WARNING "mbox_read: Bad State\n");
1407 sysfs_mbox_idle(phba); 1684 sysfs_mbox_idle(phba);
1408 spin_unlock_irq(host->host_lock); 1685 spin_unlock_irq(&phba->hbalock);
1409 return -EAGAIN; 1686 return -EAGAIN;
1410 } 1687 }
1411 1688
@@ -1416,7 +1693,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1416 if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE) 1693 if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
1417 sysfs_mbox_idle(phba); 1694 sysfs_mbox_idle(phba);
1418 1695
1419 spin_unlock_irq(phba->host->host_lock); 1696 spin_unlock_irq(&phba->hbalock);
1420 1697
1421 return count; 1698 return count;
1422} 1699}
@@ -1432,35 +1709,35 @@ static struct bin_attribute sysfs_mbox_attr = {
1432}; 1709};
1433 1710
1434int 1711int
1435lpfc_alloc_sysfs_attr(struct lpfc_hba *phba) 1712lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
1436{ 1713{
1437 struct Scsi_Host *host = phba->host; 1714 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1438 int error; 1715 int error;
1439 1716
1440 error = sysfs_create_bin_file(&host->shost_classdev.kobj, 1717 error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
1441 &sysfs_ctlreg_attr); 1718 &sysfs_ctlreg_attr);
1442 if (error) 1719 if (error)
1443 goto out; 1720 goto out;
1444 1721
1445 error = sysfs_create_bin_file(&host->shost_classdev.kobj, 1722 error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
1446 &sysfs_mbox_attr); 1723 &sysfs_mbox_attr);
1447 if (error) 1724 if (error)
1448 goto out_remove_ctlreg_attr; 1725 goto out_remove_ctlreg_attr;
1449 1726
1450 return 0; 1727 return 0;
1451out_remove_ctlreg_attr: 1728out_remove_ctlreg_attr:
1452 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr); 1729 sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
1453out: 1730out:
1454 return error; 1731 return error;
1455} 1732}
1456 1733
1457void 1734void
1458lpfc_free_sysfs_attr(struct lpfc_hba *phba) 1735lpfc_free_sysfs_attr(struct lpfc_vport *vport)
1459{ 1736{
1460 struct Scsi_Host *host = phba->host; 1737 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1461 1738
1462 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr); 1739 sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_mbox_attr);
1463 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr); 1740 sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
1464} 1741}
1465 1742
1466 1743
@@ -1471,26 +1748,30 @@ lpfc_free_sysfs_attr(struct lpfc_hba *phba)
1471static void 1748static void
1472lpfc_get_host_port_id(struct Scsi_Host *shost) 1749lpfc_get_host_port_id(struct Scsi_Host *shost)
1473{ 1750{
1474 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1751 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1752
1475 /* note: fc_myDID already in cpu endianness */ 1753 /* note: fc_myDID already in cpu endianness */
1476 fc_host_port_id(shost) = phba->fc_myDID; 1754 fc_host_port_id(shost) = vport->fc_myDID;
1477} 1755}
1478 1756
1479static void 1757static void
1480lpfc_get_host_port_type(struct Scsi_Host *shost) 1758lpfc_get_host_port_type(struct Scsi_Host *shost)
1481{ 1759{
1482 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1760 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1761 struct lpfc_hba *phba = vport->phba;
1483 1762
1484 spin_lock_irq(shost->host_lock); 1763 spin_lock_irq(shost->host_lock);
1485 1764
1486 if (phba->hba_state == LPFC_HBA_READY) { 1765 if (vport->port_type == LPFC_NPIV_PORT) {
1766 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1767 } else if (lpfc_is_link_up(phba)) {
1487 if (phba->fc_topology == TOPOLOGY_LOOP) { 1768 if (phba->fc_topology == TOPOLOGY_LOOP) {
1488 if (phba->fc_flag & FC_PUBLIC_LOOP) 1769 if (vport->fc_flag & FC_PUBLIC_LOOP)
1489 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 1770 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
1490 else 1771 else
1491 fc_host_port_type(shost) = FC_PORTTYPE_LPORT; 1772 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
1492 } else { 1773 } else {
1493 if (phba->fc_flag & FC_FABRIC) 1774 if (vport->fc_flag & FC_FABRIC)
1494 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 1775 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
1495 else 1776 else
1496 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 1777 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
@@ -1504,29 +1785,20 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
1504static void 1785static void
1505lpfc_get_host_port_state(struct Scsi_Host *shost) 1786lpfc_get_host_port_state(struct Scsi_Host *shost)
1506{ 1787{
1507 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1788 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1789 struct lpfc_hba *phba = vport->phba;
1508 1790
1509 spin_lock_irq(shost->host_lock); 1791 spin_lock_irq(shost->host_lock);
1510 1792
1511 if (phba->fc_flag & FC_OFFLINE_MODE) 1793 if (vport->fc_flag & FC_OFFLINE_MODE)
1512 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1794 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1513 else { 1795 else {
1514 switch (phba->hba_state) { 1796 switch (phba->link_state) {
1515 case LPFC_STATE_UNKNOWN: 1797 case LPFC_LINK_UNKNOWN:
1516 case LPFC_WARM_START:
1517 case LPFC_INIT_START:
1518 case LPFC_INIT_MBX_CMDS:
1519 case LPFC_LINK_DOWN: 1798 case LPFC_LINK_DOWN:
1520 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 1799 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1521 break; 1800 break;
1522 case LPFC_LINK_UP: 1801 case LPFC_LINK_UP:
1523 case LPFC_LOCAL_CFG_LINK:
1524 case LPFC_FLOGI:
1525 case LPFC_FABRIC_CFG_LINK:
1526 case LPFC_NS_REG:
1527 case LPFC_NS_QRY:
1528 case LPFC_BUILD_DISC_LIST:
1529 case LPFC_DISC_AUTH:
1530 case LPFC_CLEAR_LA: 1802 case LPFC_CLEAR_LA:
1531 case LPFC_HBA_READY: 1803 case LPFC_HBA_READY:
1532 /* Links up, beyond this port_type reports state */ 1804 /* Links up, beyond this port_type reports state */
@@ -1547,11 +1819,12 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
1547static void 1819static void
1548lpfc_get_host_speed(struct Scsi_Host *shost) 1820lpfc_get_host_speed(struct Scsi_Host *shost)
1549{ 1821{
1550 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1822 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1823 struct lpfc_hba *phba = vport->phba;
1551 1824
1552 spin_lock_irq(shost->host_lock); 1825 spin_lock_irq(shost->host_lock);
1553 1826
1554 if (phba->hba_state == LPFC_HBA_READY) { 1827 if (lpfc_is_link_up(phba)) {
1555 switch(phba->fc_linkspeed) { 1828 switch(phba->fc_linkspeed) {
1556 case LA_1GHZ_LINK: 1829 case LA_1GHZ_LINK:
1557 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 1830 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
@@ -1577,39 +1850,31 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
1577static void 1850static void
1578lpfc_get_host_fabric_name (struct Scsi_Host *shost) 1851lpfc_get_host_fabric_name (struct Scsi_Host *shost)
1579{ 1852{
1580 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1853 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1854 struct lpfc_hba *phba = vport->phba;
1581 u64 node_name; 1855 u64 node_name;
1582 1856
1583 spin_lock_irq(shost->host_lock); 1857 spin_lock_irq(shost->host_lock);
1584 1858
1585 if ((phba->fc_flag & FC_FABRIC) || 1859 if ((vport->fc_flag & FC_FABRIC) ||
1586 ((phba->fc_topology == TOPOLOGY_LOOP) && 1860 ((phba->fc_topology == TOPOLOGY_LOOP) &&
1587 (phba->fc_flag & FC_PUBLIC_LOOP))) 1861 (vport->fc_flag & FC_PUBLIC_LOOP)))
1588 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); 1862 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
1589 else 1863 else
1590 /* fabric is local port if there is no F/FL_Port */ 1864 /* fabric is local port if there is no F/FL_Port */
1591 node_name = wwn_to_u64(phba->fc_nodename.u.wwn); 1865 node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1592 1866
1593 spin_unlock_irq(shost->host_lock); 1867 spin_unlock_irq(shost->host_lock);
1594 1868
1595 fc_host_fabric_name(shost) = node_name; 1869 fc_host_fabric_name(shost) = node_name;
1596} 1870}
1597 1871
1598static void
1599lpfc_get_host_symbolic_name (struct Scsi_Host *shost)
1600{
1601 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
1602
1603 spin_lock_irq(shost->host_lock);
1604 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
1605 spin_unlock_irq(shost->host_lock);
1606}
1607
1608static struct fc_host_statistics * 1872static struct fc_host_statistics *
1609lpfc_get_stats(struct Scsi_Host *shost) 1873lpfc_get_stats(struct Scsi_Host *shost)
1610{ 1874{
1611 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1875 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1612 struct lpfc_sli *psli = &phba->sli; 1876 struct lpfc_hba *phba = vport->phba;
1877 struct lpfc_sli *psli = &phba->sli;
1613 struct fc_host_statistics *hs = &phba->link_stats; 1878 struct fc_host_statistics *hs = &phba->link_stats;
1614 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; 1879 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1615 LPFC_MBOXQ_t *pmboxq; 1880 LPFC_MBOXQ_t *pmboxq;
@@ -1617,7 +1882,16 @@ lpfc_get_stats(struct Scsi_Host *shost)
1617 unsigned long seconds; 1882 unsigned long seconds;
1618 int rc = 0; 1883 int rc = 0;
1619 1884
1620 if (phba->fc_flag & FC_BLOCK_MGMT_IO) 1885 /*
1886 * prevent udev from issuing mailbox commands until the port is
1887 * configured.
1888 */
1889 if (phba->link_state < LPFC_LINK_DOWN ||
1890 !phba->mbox_mem_pool ||
1891 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
1892 return NULL;
1893
1894 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1621 return NULL; 1895 return NULL;
1622 1896
1623 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1897 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1629,17 +1903,16 @@ lpfc_get_stats(struct Scsi_Host *shost)
1629 pmb->mbxCommand = MBX_READ_STATUS; 1903 pmb->mbxCommand = MBX_READ_STATUS;
1630 pmb->mbxOwner = OWN_HOST; 1904 pmb->mbxOwner = OWN_HOST;
1631 pmboxq->context1 = NULL; 1905 pmboxq->context1 = NULL;
1906 pmboxq->vport = vport;
1632 1907
1633 if ((phba->fc_flag & FC_OFFLINE_MODE) || 1908 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1634 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 1909 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1635 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 1910 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1636 else 1911 else
1637 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 1912 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1638 1913
1639 if (rc != MBX_SUCCESS) { 1914 if (rc != MBX_SUCCESS) {
1640 if (rc == MBX_TIMEOUT) 1915 if (rc != MBX_TIMEOUT)
1641 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1642 else
1643 mempool_free(pmboxq, phba->mbox_mem_pool); 1916 mempool_free(pmboxq, phba->mbox_mem_pool);
1644 return NULL; 1917 return NULL;
1645 } 1918 }
@@ -1655,18 +1928,17 @@ lpfc_get_stats(struct Scsi_Host *shost)
1655 pmb->mbxCommand = MBX_READ_LNK_STAT; 1928 pmb->mbxCommand = MBX_READ_LNK_STAT;
1656 pmb->mbxOwner = OWN_HOST; 1929 pmb->mbxOwner = OWN_HOST;
1657 pmboxq->context1 = NULL; 1930 pmboxq->context1 = NULL;
1931 pmboxq->vport = vport;
1658 1932
1659 if ((phba->fc_flag & FC_OFFLINE_MODE) || 1933 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1660 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 1934 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1661 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 1935 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1662 else 1936 else
1663 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 1937 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1664 1938
1665 if (rc != MBX_SUCCESS) { 1939 if (rc != MBX_SUCCESS) {
1666 if (rc == MBX_TIMEOUT) 1940 if (rc != MBX_TIMEOUT)
1667 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1941 mempool_free(pmboxq, phba->mbox_mem_pool);
1668 else
1669 mempool_free( pmboxq, phba->mbox_mem_pool);
1670 return NULL; 1942 return NULL;
1671 } 1943 }
1672 1944
@@ -1713,14 +1985,15 @@ lpfc_get_stats(struct Scsi_Host *shost)
1713static void 1985static void
1714lpfc_reset_stats(struct Scsi_Host *shost) 1986lpfc_reset_stats(struct Scsi_Host *shost)
1715{ 1987{
1716 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1988 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1717 struct lpfc_sli *psli = &phba->sli; 1989 struct lpfc_hba *phba = vport->phba;
1718 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; 1990 struct lpfc_sli *psli = &phba->sli;
1991 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
1719 LPFC_MBOXQ_t *pmboxq; 1992 LPFC_MBOXQ_t *pmboxq;
1720 MAILBOX_t *pmb; 1993 MAILBOX_t *pmb;
1721 int rc = 0; 1994 int rc = 0;
1722 1995
1723 if (phba->fc_flag & FC_BLOCK_MGMT_IO) 1996 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1724 return; 1997 return;
1725 1998
1726 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1999 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1733,17 +2006,16 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1733 pmb->mbxOwner = OWN_HOST; 2006 pmb->mbxOwner = OWN_HOST;
1734 pmb->un.varWords[0] = 0x1; /* reset request */ 2007 pmb->un.varWords[0] = 0x1; /* reset request */
1735 pmboxq->context1 = NULL; 2008 pmboxq->context1 = NULL;
2009 pmboxq->vport = vport;
1736 2010
1737 if ((phba->fc_flag & FC_OFFLINE_MODE) || 2011 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1738 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 2012 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1739 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2013 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1740 else 2014 else
1741 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 2015 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1742 2016
1743 if (rc != MBX_SUCCESS) { 2017 if (rc != MBX_SUCCESS) {
1744 if (rc == MBX_TIMEOUT) 2018 if (rc != MBX_TIMEOUT)
1745 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1746 else
1747 mempool_free(pmboxq, phba->mbox_mem_pool); 2019 mempool_free(pmboxq, phba->mbox_mem_pool);
1748 return; 2020 return;
1749 } 2021 }
@@ -1752,17 +2024,16 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1752 pmb->mbxCommand = MBX_READ_LNK_STAT; 2024 pmb->mbxCommand = MBX_READ_LNK_STAT;
1753 pmb->mbxOwner = OWN_HOST; 2025 pmb->mbxOwner = OWN_HOST;
1754 pmboxq->context1 = NULL; 2026 pmboxq->context1 = NULL;
2027 pmboxq->vport = vport;
1755 2028
1756 if ((phba->fc_flag & FC_OFFLINE_MODE) || 2029 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1757 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 2030 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1758 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2031 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1759 else 2032 else
1760 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 2033 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1761 2034
1762 if (rc != MBX_SUCCESS) { 2035 if (rc != MBX_SUCCESS) {
1763 if (rc == MBX_TIMEOUT) 2036 if (rc != MBX_TIMEOUT)
1764 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1765 else
1766 mempool_free( pmboxq, phba->mbox_mem_pool); 2037 mempool_free( pmboxq, phba->mbox_mem_pool);
1767 return; 2038 return;
1768 } 2039 }
@@ -1791,13 +2062,13 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1791static struct lpfc_nodelist * 2062static struct lpfc_nodelist *
1792lpfc_get_node_by_target(struct scsi_target *starget) 2063lpfc_get_node_by_target(struct scsi_target *starget)
1793{ 2064{
1794 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2065 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1795 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata; 2066 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1796 struct lpfc_nodelist *ndlp; 2067 struct lpfc_nodelist *ndlp;
1797 2068
1798 spin_lock_irq(shost->host_lock); 2069 spin_lock_irq(shost->host_lock);
1799 /* Search for this, mapped, target ID */ 2070 /* Search for this, mapped, target ID */
1800 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 2071 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1801 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 2072 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1802 starget->id == ndlp->nlp_sid) { 2073 starget->id == ndlp->nlp_sid) {
1803 spin_unlock_irq(shost->host_lock); 2074 spin_unlock_irq(shost->host_lock);
@@ -1887,8 +2158,66 @@ struct fc_function_template lpfc_transport_functions = {
1887 .get_host_fabric_name = lpfc_get_host_fabric_name, 2158 .get_host_fabric_name = lpfc_get_host_fabric_name,
1888 .show_host_fabric_name = 1, 2159 .show_host_fabric_name = 1,
1889 2160
1890 .get_host_symbolic_name = lpfc_get_host_symbolic_name, 2161 /*
1891 .show_host_symbolic_name = 1, 2162 * The LPFC driver treats linkdown handling as target loss events
2163 * so there are no sysfs handlers for link_down_tmo.
2164 */
2165
2166 .get_fc_host_stats = lpfc_get_stats,
2167 .reset_fc_host_stats = lpfc_reset_stats,
2168
2169 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
2170 .show_rport_maxframe_size = 1,
2171 .show_rport_supported_classes = 1,
2172
2173 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
2174 .show_rport_dev_loss_tmo = 1,
2175
2176 .get_starget_port_id = lpfc_get_starget_port_id,
2177 .show_starget_port_id = 1,
2178
2179 .get_starget_node_name = lpfc_get_starget_node_name,
2180 .show_starget_node_name = 1,
2181
2182 .get_starget_port_name = lpfc_get_starget_port_name,
2183 .show_starget_port_name = 1,
2184
2185 .issue_fc_host_lip = lpfc_issue_lip,
2186 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
2187 .terminate_rport_io = lpfc_terminate_rport_io,
2188
2189 .vport_create = lpfc_vport_create,
2190 .vport_delete = lpfc_vport_delete,
2191 .dd_fcvport_size = sizeof(struct lpfc_vport *),
2192};
2193
2194struct fc_function_template lpfc_vport_transport_functions = {
2195 /* fixed attributes the driver supports */
2196 .show_host_node_name = 1,
2197 .show_host_port_name = 1,
2198 .show_host_supported_classes = 1,
2199 .show_host_supported_fc4s = 1,
2200 .show_host_supported_speeds = 1,
2201 .show_host_maxframe_size = 1,
2202
2203 /* dynamic attributes the driver supports */
2204 .get_host_port_id = lpfc_get_host_port_id,
2205 .show_host_port_id = 1,
2206
2207 .get_host_port_type = lpfc_get_host_port_type,
2208 .show_host_port_type = 1,
2209
2210 .get_host_port_state = lpfc_get_host_port_state,
2211 .show_host_port_state = 1,
2212
2213 /* active_fc4s is shown but doesn't change (thus no get function) */
2214 .show_host_active_fc4s = 1,
2215
2216 .get_host_speed = lpfc_get_host_speed,
2217 .show_host_speed = 1,
2218
2219 .get_host_fabric_name = lpfc_get_host_fabric_name,
2220 .show_host_fabric_name = 1,
1892 2221
1893 /* 2222 /*
1894 * The LPFC driver treats linkdown handling as target loss events 2223 * The LPFC driver treats linkdown handling as target loss events
@@ -1917,6 +2246,8 @@ struct fc_function_template lpfc_transport_functions = {
1917 .issue_fc_host_lip = lpfc_issue_lip, 2246 .issue_fc_host_lip = lpfc_issue_lip,
1918 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 2247 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
1919 .terminate_rport_io = lpfc_terminate_rport_io, 2248 .terminate_rport_io = lpfc_terminate_rport_io,
2249
2250 .vport_disable = lpfc_vport_disable,
1920}; 2251};
1921 2252
1922void 2253void
@@ -1939,6 +2270,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1939 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 2270 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1940 lpfc_max_luns_init(phba, lpfc_max_luns); 2271 lpfc_max_luns_init(phba, lpfc_max_luns);
1941 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 2272 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
2273 lpfc_peer_port_login_init(phba, lpfc_peer_port_login);
2274 lpfc_npiv_enable_init(phba, lpfc_npiv_enable);
2275 lpfc_vport_restrict_login_init(phba, lpfc_vport_restrict_login);
1942 lpfc_use_msi_init(phba, lpfc_use_msi); 2276 lpfc_use_msi_init(phba, lpfc_use_msi);
1943 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo); 2277 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
1944 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); 2278 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b8c2a8862d8c..e19d1a746586 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,92 +23,114 @@ typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
26int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 27int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
27 struct lpfc_dmabuf *mp); 28 struct lpfc_dmabuf *mp);
28void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 29void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
30void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport);
29void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); 31void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
30int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); 32int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
31void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 33void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
32void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 34void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
33int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, 35int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
34 uint32_t); 36 LPFC_MBOXQ_t *, uint32_t);
35void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); 37void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
36void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); 38void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
39void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
40void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
37void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 41void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
38 42
39 43void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
40int lpfc_linkdown(struct lpfc_hba *); 44int lpfc_linkdown(struct lpfc_hba *);
41void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 45void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
42 46
43void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 47void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
44void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 48void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
49void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
45void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 50void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
46void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 51void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
47void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 52void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
48void lpfc_dequeue_node(struct lpfc_hba *, struct lpfc_nodelist *); 53void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
49void lpfc_nlp_set_state(struct lpfc_hba *, struct lpfc_nodelist *, int); 54void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
50void lpfc_drop_node(struct lpfc_hba *, struct lpfc_nodelist *); 55void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
51void lpfc_set_disctmo(struct lpfc_hba *); 56void lpfc_set_disctmo(struct lpfc_vport *);
52int lpfc_can_disctmo(struct lpfc_hba *); 57int lpfc_can_disctmo(struct lpfc_vport *);
53int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *); 58int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
59void lpfc_unreg_all_rpis(struct lpfc_vport *);
60void lpfc_unreg_default_rpis(struct lpfc_vport *);
61void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
62
54int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, 63int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
55 struct lpfc_iocbq *, struct lpfc_nodelist *); 64 struct lpfc_iocbq *, struct lpfc_nodelist *);
56void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t); 65void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
57struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); 66struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
58int lpfc_nlp_put(struct lpfc_nodelist *); 67int lpfc_nlp_put(struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t); 68struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
60void lpfc_disc_list_loopmap(struct lpfc_hba *); 69void lpfc_disc_list_loopmap(struct lpfc_vport *);
61void lpfc_disc_start(struct lpfc_hba *); 70void lpfc_disc_start(struct lpfc_vport *);
62void lpfc_disc_flush_list(struct lpfc_hba *); 71void lpfc_disc_flush_list(struct lpfc_vport *);
72void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
63void lpfc_disc_timeout(unsigned long); 73void lpfc_disc_timeout(unsigned long);
64 74
65struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi); 75struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
66struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi); 76struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
67 77
78void lpfc_worker_wake_up(struct lpfc_hba *);
68int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); 79int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
69int lpfc_do_work(void *); 80int lpfc_do_work(void *);
70int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *, 81int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
71 uint32_t); 82 uint32_t);
72 83
73int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *, 84void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
85 struct lpfc_nodelist *);
86void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
87int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
74 struct serv_parm *, uint32_t); 88 struct serv_parm *, uint32_t);
75int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp); 89int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
90int lpfc_els_chk_latt(struct lpfc_vport *);
76int lpfc_els_abort_flogi(struct lpfc_hba *); 91int lpfc_els_abort_flogi(struct lpfc_hba *);
77int lpfc_initial_flogi(struct lpfc_hba *); 92int lpfc_initial_flogi(struct lpfc_vport *);
78int lpfc_issue_els_plogi(struct lpfc_hba *, uint32_t, uint8_t); 93int lpfc_initial_fdisc(struct lpfc_vport *);
79int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t); 94int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
80int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t); 95int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
81int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t); 96int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
82int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t); 97int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
98int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
99int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
100int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
83int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 101int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
84int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, 102int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
103int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
85 struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t); 104 struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
86int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, 105int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
87 struct lpfc_nodelist *); 106 struct lpfc_nodelist *, LPFC_MBOXQ_t *);
88int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *, 107int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *,
89 struct lpfc_nodelist *); 108 struct lpfc_nodelist *);
90int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *, 109int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
91 struct lpfc_nodelist *); 110 struct lpfc_nodelist *);
92void lpfc_cancel_retry_delay_tmo(struct lpfc_hba *, struct lpfc_nodelist *); 111void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
93void lpfc_els_retry_delay(unsigned long); 112void lpfc_els_retry_delay(unsigned long);
94void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); 113void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
114void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *);
95void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 115void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
96 struct lpfc_iocbq *); 116 struct lpfc_iocbq *);
97int lpfc_els_handle_rscn(struct lpfc_hba *); 117int lpfc_els_handle_rscn(struct lpfc_vport *);
98int lpfc_els_flush_rscn(struct lpfc_hba *); 118void lpfc_els_flush_rscn(struct lpfc_vport *);
99int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t); 119int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
100void lpfc_els_flush_cmd(struct lpfc_hba *); 120void lpfc_els_flush_cmd(struct lpfc_vport *);
101int lpfc_els_disc_adisc(struct lpfc_hba *); 121int lpfc_els_disc_adisc(struct lpfc_vport *);
102int lpfc_els_disc_plogi(struct lpfc_hba *); 122int lpfc_els_disc_plogi(struct lpfc_vport *);
103void lpfc_els_timeout(unsigned long); 123void lpfc_els_timeout(unsigned long);
104void lpfc_els_timeout_handler(struct lpfc_hba *); 124void lpfc_els_timeout_handler(struct lpfc_vport *);
125void lpfc_hb_timeout(unsigned long);
126void lpfc_hb_timeout_handler(struct lpfc_hba *);
105 127
106void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 128void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
107 struct lpfc_iocbq *); 129 struct lpfc_iocbq *);
108int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int); 130int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
109int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int); 131int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
110void lpfc_fdmi_tmo(unsigned long); 132void lpfc_fdmi_tmo(unsigned long);
111void lpfc_fdmi_tmo_handler(struct lpfc_hba *); 133void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport);
112 134
113int lpfc_config_port_prep(struct lpfc_hba *); 135int lpfc_config_port_prep(struct lpfc_hba *);
114int lpfc_config_port_post(struct lpfc_hba *); 136int lpfc_config_port_post(struct lpfc_hba *);
@@ -136,16 +158,23 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
136void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 158void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
137void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 159void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
138LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 160LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
161void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
139int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 162int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
140 163
164void lpfc_config_hbq(struct lpfc_hba *, struct lpfc_hbq_init *, uint32_t ,
165 LPFC_MBOXQ_t *);
166struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t);
167
141int lpfc_mem_alloc(struct lpfc_hba *); 168int lpfc_mem_alloc(struct lpfc_hba *);
142void lpfc_mem_free(struct lpfc_hba *); 169void lpfc_mem_free(struct lpfc_hba *);
170void lpfc_stop_vport_timers(struct lpfc_vport *);
143 171
144void lpfc_poll_timeout(unsigned long ptr); 172void lpfc_poll_timeout(unsigned long ptr);
145void lpfc_poll_start_timer(struct lpfc_hba * phba); 173void lpfc_poll_start_timer(struct lpfc_hba * phba);
146void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba); 174void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
147struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 175struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
148void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 176void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
177void __lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
149uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 178uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
150 179
151void lpfc_reset_barrier(struct lpfc_hba * phba); 180void lpfc_reset_barrier(struct lpfc_hba * phba);
@@ -154,6 +183,7 @@ int lpfc_sli_brdkill(struct lpfc_hba *);
154int lpfc_sli_brdreset(struct lpfc_hba *); 183int lpfc_sli_brdreset(struct lpfc_hba *);
155int lpfc_sli_brdrestart(struct lpfc_hba *); 184int lpfc_sli_brdrestart(struct lpfc_hba *);
156int lpfc_sli_hba_setup(struct lpfc_hba *); 185int lpfc_sli_hba_setup(struct lpfc_hba *);
186int lpfc_sli_host_down(struct lpfc_vport *);
157int lpfc_sli_hba_down(struct lpfc_hba *); 187int lpfc_sli_hba_down(struct lpfc_hba *);
158int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 188int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
159int lpfc_sli_handle_mb_event(struct lpfc_hba *); 189int lpfc_sli_handle_mb_event(struct lpfc_hba *);
@@ -164,27 +194,36 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
164int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, 194int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
165 struct lpfc_iocbq *, uint32_t); 195 struct lpfc_iocbq *, uint32_t);
166void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 196void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
167int lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 197void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
168int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 198int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
169 struct lpfc_dmabuf *); 199 struct lpfc_dmabuf *);
170struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, 200struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
171 struct lpfc_sli_ring *, 201 struct lpfc_sli_ring *,
172 dma_addr_t); 202 dma_addr_t);
203int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t);
204int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
205void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
206struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t);
207int lpfc_sli_hbq_size(void);
173int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, 208int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
174 struct lpfc_iocbq *); 209 struct lpfc_iocbq *);
175int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t, 210int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
176 uint64_t, lpfc_ctx_cmd); 211 uint64_t, lpfc_ctx_cmd);
177int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t, 212int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
178 uint64_t, uint32_t, lpfc_ctx_cmd); 213 uint64_t, uint32_t, lpfc_ctx_cmd);
179 214
180void lpfc_mbox_timeout(unsigned long); 215void lpfc_mbox_timeout(unsigned long);
181void lpfc_mbox_timeout_handler(struct lpfc_hba *); 216void lpfc_mbox_timeout_handler(struct lpfc_hba *);
182 217
183struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba *, uint32_t); 218struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter,
184struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_hba *, struct lpfc_name *); 219 void *);
220struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *);
221struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
222struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
223 struct lpfc_name *);
185 224
186int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 225int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
187 uint32_t timeout); 226 uint32_t timeout);
188 227
189int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, 228int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
190 struct lpfc_sli_ring * pring, 229 struct lpfc_sli_ring * pring,
@@ -195,25 +234,56 @@ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
195 struct lpfc_iocbq * cmdiocb, 234 struct lpfc_iocbq * cmdiocb,
196 struct lpfc_iocbq * rspiocb); 235 struct lpfc_iocbq * rspiocb);
197 236
237void *lpfc_hbq_alloc(struct lpfc_hba *, int, dma_addr_t *);
238void lpfc_hbq_free(struct lpfc_hba *, void *, dma_addr_t);
239void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
240
198void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *); 241void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
242void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
199void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); 243void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
200 244
245void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
201/* Function prototypes. */ 246/* Function prototypes. */
202const char* lpfc_info(struct Scsi_Host *); 247const char* lpfc_info(struct Scsi_Host *);
203void lpfc_scan_start(struct Scsi_Host *);
204int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 248int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
205 249
206void lpfc_get_cfgparam(struct lpfc_hba *); 250void lpfc_get_cfgparam(struct lpfc_hba *);
207int lpfc_alloc_sysfs_attr(struct lpfc_hba *); 251int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
208void lpfc_free_sysfs_attr(struct lpfc_hba *); 252void lpfc_free_sysfs_attr(struct lpfc_vport *);
209extern struct class_device_attribute *lpfc_host_attrs[]; 253extern struct class_device_attribute *lpfc_hba_attrs[];
210extern struct scsi_host_template lpfc_template; 254extern struct scsi_host_template lpfc_template;
211extern struct fc_function_template lpfc_transport_functions; 255extern struct fc_function_template lpfc_transport_functions;
256extern struct fc_function_template lpfc_vport_transport_functions;
257extern int lpfc_sli_mode;
212 258
213void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp); 259int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
214void lpfc_terminate_rport_io(struct fc_rport *); 260void lpfc_terminate_rport_io(struct fc_rport *);
215void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); 261void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
216 262
263struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct fc_vport *);
264int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
265void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
266void destroy_port(struct lpfc_vport *);
267int lpfc_get_instance(void);
268void lpfc_host_attrib_init(struct Scsi_Host *);
269
270extern void lpfc_debugfs_initialize(struct lpfc_vport *);
271extern void lpfc_debugfs_terminate(struct lpfc_vport *);
272extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
273 uint32_t, uint32_t);
274
275/* Interface exported by fabric iocb scheduler */
276int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
277void lpfc_fabric_abort_vport(struct lpfc_vport *);
278void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
279void lpfc_fabric_abort_hba(struct lpfc_hba *);
280void lpfc_fabric_abort_flogi(struct lpfc_hba *);
281void lpfc_fabric_block_timeout(unsigned long);
282void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
283void lpfc_adjust_queue_depth(struct lpfc_hba *);
284void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
285void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
286
217#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 287#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
218#define HBA_EVENT_RSCN 5 288#define HBA_EVENT_RSCN 5
219#define HBA_EVENT_LINK_UP 2 289#define HBA_EVENT_LINK_UP 2
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 34a9e3bb2614..ae9d6f385a6c 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -40,6 +40,8 @@
40#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h" 41#include "lpfc_crtn.h"
42#include "lpfc_version.h" 42#include "lpfc_version.h"
43#include "lpfc_vport.h"
44#include "lpfc_debugfs.h"
43 45
44#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver 46#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
45 * incapable of reporting */ 47 * incapable of reporting */
@@ -58,25 +60,69 @@ static char *lpfc_release_version = LPFC_DRIVER_VERSION;
58/* 60/*
59 * lpfc_ct_unsol_event 61 * lpfc_ct_unsol_event
60 */ 62 */
63static void
64lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
65 struct lpfc_dmabuf *mp, uint32_t size)
66{
67 if (!mp) {
68 printk(KERN_ERR "%s (%d): Unsolited CT, no buffer, "
69 "piocbq = %p, status = x%x, mp = %p, size = %d\n",
70 __FUNCTION__, __LINE__,
71 piocbq, piocbq->iocb.ulpStatus, mp, size);
72 }
73
74 printk(KERN_ERR "%s (%d): Ignoring unsolicted CT piocbq = %p, "
75 "buffer = %p, size = %d, status = x%x\n",
76 __FUNCTION__, __LINE__,
77 piocbq, mp, size,
78 piocbq->iocb.ulpStatus);
79
80}
81
82static void
83lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
84 struct lpfc_dmabuf *mp, uint32_t size)
85{
86 if (!mp) {
87 printk(KERN_ERR "%s (%d): Unsolited CT, no "
88 "HBQ buffer, piocbq = %p, status = x%x\n",
89 __FUNCTION__, __LINE__,
90 piocbq, piocbq->iocb.ulpStatus);
91 } else {
92 lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
93 printk(KERN_ERR "%s (%d): Ignoring unsolicted CT "
94 "piocbq = %p, buffer = %p, size = %d, "
95 "status = x%x\n",
96 __FUNCTION__, __LINE__,
97 piocbq, mp, size, piocbq->iocb.ulpStatus);
98 }
99}
100
61void 101void
62lpfc_ct_unsol_event(struct lpfc_hba * phba, 102lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
63 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq) 103 struct lpfc_iocbq *piocbq)
64{ 104{
65 105
66 struct lpfc_iocbq *next_piocbq; 106 struct lpfc_dmabuf *mp = NULL;
67 struct lpfc_dmabuf *pmbuf = NULL;
68 struct lpfc_dmabuf *matp, *next_matp;
69 uint32_t ctx = 0, size = 0, cnt = 0;
70 IOCB_t *icmd = &piocbq->iocb; 107 IOCB_t *icmd = &piocbq->iocb;
71 IOCB_t *save_icmd = icmd; 108 int i;
72 int i, go_exit = 0; 109 struct lpfc_iocbq *iocbq;
73 struct list_head head; 110 dma_addr_t paddr;
111 uint32_t size;
112 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
113 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
114
115 piocbq->context2 = NULL;
116 piocbq->context3 = NULL;
74 117
75 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && 118 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
119 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
120 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
76 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { 121 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
77 /* Not enough posted buffers; Try posting more buffers */ 122 /* Not enough posted buffers; Try posting more buffers */
78 phba->fc_stat.NoRcvBuf++; 123 phba->fc_stat.NoRcvBuf++;
79 lpfc_post_buffer(phba, pring, 0, 1); 124 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
125 lpfc_post_buffer(phba, pring, 0, 1);
80 return; 126 return;
81 } 127 }
82 128
@@ -86,66 +132,56 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
86 if (icmd->ulpBdeCount == 0) 132 if (icmd->ulpBdeCount == 0)
87 return; 133 return;
88 134
89 INIT_LIST_HEAD(&head); 135 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
90 list_add_tail(&head, &piocbq->list); 136 list_for_each_entry(iocbq, &piocbq->list, list) {
91 137 icmd = &iocbq->iocb;
92 list_for_each_entry_safe(piocbq, next_piocbq, &head, list) { 138 if (icmd->ulpBdeCount == 0) {
93 icmd = &piocbq->iocb; 139 printk(KERN_ERR "%s (%d): Unsolited CT, no "
94 if (ctx == 0) 140 "BDE, iocbq = %p, status = x%x\n",
95 ctx = (uint32_t) (icmd->ulpContext); 141 __FUNCTION__, __LINE__,
96 if (icmd->ulpBdeCount == 0) 142 iocbq, iocbq->iocb.ulpStatus);
97 continue; 143 continue;
98
99 for (i = 0; i < icmd->ulpBdeCount; i++) {
100 matp = lpfc_sli_ringpostbuf_get(phba, pring,
101 getPaddr(icmd->un.
102 cont64[i].
103 addrHigh,
104 icmd->un.
105 cont64[i].
106 addrLow));
107 if (!matp) {
108 /* Insert lpfc log message here */
109 lpfc_post_buffer(phba, pring, cnt, 1);
110 go_exit = 1;
111 goto ct_unsol_event_exit_piocbq;
112 } 144 }
113 145
114 /* Typically for Unsolicited CT requests */ 146 size = icmd->un.cont64[0].tus.f.bdeSize;
115 if (!pmbuf) { 147 lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size);
116 pmbuf = matp; 148 lpfc_in_buf_free(phba, bdeBuf1);
117 INIT_LIST_HEAD(&pmbuf->list); 149 if (icmd->ulpBdeCount == 2) {
118 } else 150 lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2,
119 list_add_tail(&matp->list, &pmbuf->list); 151 size);
120 152 lpfc_in_buf_free(phba, bdeBuf2);
121 size += icmd->un.cont64[i].tus.f.bdeSize; 153 }
122 cnt++;
123 } 154 }
155 } else {
156 struct lpfc_iocbq *next;
157
158 list_for_each_entry_safe(iocbq, next, &piocbq->list, list) {
159 icmd = &iocbq->iocb;
160 if (icmd->ulpBdeCount == 0) {
161 printk(KERN_ERR "%s (%d): Unsolited CT, no "
162 "BDE, iocbq = %p, status = x%x\n",
163 __FUNCTION__, __LINE__,
164 iocbq, iocbq->iocb.ulpStatus);
165 continue;
166 }
124 167
125 icmd->ulpBdeCount = 0; 168 for (i = 0; i < icmd->ulpBdeCount; i++) {
126 } 169 paddr = getPaddr(icmd->un.cont64[i].addrHigh,
127 170 icmd->un.cont64[i].addrLow);
128 lpfc_post_buffer(phba, pring, cnt, 1); 171 mp = lpfc_sli_ringpostbuf_get(phba, pring,
129 if (save_icmd->ulpStatus) { 172 paddr);
130 go_exit = 1; 173 size = icmd->un.cont64[i].tus.f.bdeSize;
131 } 174 lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
132 175 lpfc_in_buf_free(phba, mp);
133ct_unsol_event_exit_piocbq: 176 }
134 list_del(&head); 177 list_del(&iocbq->list);
135 if (pmbuf) { 178 lpfc_sli_release_iocbq(phba, iocbq);
136 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
137 lpfc_mbuf_free(phba, matp->virt, matp->phys);
138 list_del(&matp->list);
139 kfree(matp);
140 } 179 }
141 lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys);
142 kfree(pmbuf);
143 } 180 }
144 return;
145} 181}
146 182
147static void 183static void
148lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist) 184lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
149{ 185{
150 struct lpfc_dmabuf *mlast, *next_mlast; 186 struct lpfc_dmabuf *mlast, *next_mlast;
151 187
@@ -160,7 +196,7 @@ lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
160} 196}
161 197
162static struct lpfc_dmabuf * 198static struct lpfc_dmabuf *
163lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl, 199lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
164 uint32_t size, int *entries) 200 uint32_t size, int *entries)
165{ 201{
166 struct lpfc_dmabuf *mlist = NULL; 202 struct lpfc_dmabuf *mlist = NULL;
@@ -181,7 +217,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
181 217
182 INIT_LIST_HEAD(&mp->list); 218 INIT_LIST_HEAD(&mp->list);
183 219
184 if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT)) 220 if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) ||
221 cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID))
185 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 222 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
186 else 223 else
187 mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); 224 mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
@@ -201,8 +238,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
201 238
202 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 239 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
203 /* build buffer ptr list for IOCB */ 240 /* build buffer ptr list for IOCB */
204 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); 241 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
205 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); 242 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
206 bpl->tus.f.bdeSize = (uint16_t) cnt; 243 bpl->tus.f.bdeSize = (uint16_t) cnt;
207 bpl->tus.w = le32_to_cpu(bpl->tus.w); 244 bpl->tus.w = le32_to_cpu(bpl->tus.w);
208 bpl++; 245 bpl++;
@@ -215,24 +252,49 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
215 return mlist; 252 return mlist;
216} 253}
217 254
255int
256lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
257{
258 struct lpfc_dmabuf *buf_ptr;
259
260 if (ctiocb->context1) {
261 buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
262 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
263 kfree(buf_ptr);
264 ctiocb->context1 = NULL;
265 }
266 if (ctiocb->context2) {
267 lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
268 ctiocb->context2 = NULL;
269 }
270
271 if (ctiocb->context3) {
272 buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
273 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
274 kfree(buf_ptr);
275 ctiocb->context1 = NULL;
276 }
277 lpfc_sli_release_iocbq(phba, ctiocb);
278 return 0;
279}
280
218static int 281static int
219lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp, 282lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
220 struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, 283 struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
221 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 284 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
222 struct lpfc_iocbq *), 285 struct lpfc_iocbq *),
223 struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry, 286 struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
224 uint32_t tmo) 287 uint32_t tmo, uint8_t retry)
225{ 288{
226 289 struct lpfc_hba *phba = vport->phba;
227 struct lpfc_sli *psli = &phba->sli; 290 struct lpfc_sli *psli = &phba->sli;
228 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 291 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
229 IOCB_t *icmd; 292 IOCB_t *icmd;
230 struct lpfc_iocbq *geniocb; 293 struct lpfc_iocbq *geniocb;
294 int rc;
231 295
232 /* Allocate buffer for command iocb */ 296 /* Allocate buffer for command iocb */
233 spin_lock_irq(phba->host->host_lock);
234 geniocb = lpfc_sli_get_iocbq(phba); 297 geniocb = lpfc_sli_get_iocbq(phba);
235 spin_unlock_irq(phba->host->host_lock);
236 298
237 if (geniocb == NULL) 299 if (geniocb == NULL)
238 return 1; 300 return 1;
@@ -272,31 +334,40 @@ lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
272 icmd->ulpClass = CLASS3; 334 icmd->ulpClass = CLASS3;
273 icmd->ulpContext = ndlp->nlp_rpi; 335 icmd->ulpContext = ndlp->nlp_rpi;
274 336
337 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
338 /* For GEN_REQUEST64_CR, use the RPI */
339 icmd->ulpCt_h = 0;
340 icmd->ulpCt_l = 0;
341 }
342
275 /* Issue GEN REQ IOCB for NPORT <did> */ 343 /* Issue GEN REQ IOCB for NPORT <did> */
276 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 344 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
277 "%d:0119 Issue GEN REQ IOCB for NPORT x%x " 345 "%d (%d):0119 Issue GEN REQ IOCB to NPORT x%x "
278 "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5], 346 "Data: x%x x%x\n", phba->brd_no, vport->vpi,
279 icmd->ulpIoTag, phba->hba_state); 347 ndlp->nlp_DID, icmd->ulpIoTag,
348 vport->port_state);
280 geniocb->iocb_cmpl = cmpl; 349 geniocb->iocb_cmpl = cmpl;
281 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 350 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
282 spin_lock_irq(phba->host->host_lock); 351 geniocb->vport = vport;
283 if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) { 352 geniocb->retry = retry;
353 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
354
355 if (rc == IOCB_ERROR) {
284 lpfc_sli_release_iocbq(phba, geniocb); 356 lpfc_sli_release_iocbq(phba, geniocb);
285 spin_unlock_irq(phba->host->host_lock);
286 return 1; 357 return 1;
287 } 358 }
288 spin_unlock_irq(phba->host->host_lock);
289 359
290 return 0; 360 return 0;
291} 361}
292 362
293static int 363static int
294lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp, 364lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
295 struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, 365 struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
296 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 366 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
297 struct lpfc_iocbq *), 367 struct lpfc_iocbq *),
298 uint32_t rsp_size) 368 uint32_t rsp_size, uint8_t retry)
299{ 369{
370 struct lpfc_hba *phba = vport->phba;
300 struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; 371 struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
301 struct lpfc_dmabuf *outmp; 372 struct lpfc_dmabuf *outmp;
302 int cnt = 0, status; 373 int cnt = 0, status;
@@ -310,8 +381,8 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
310 if (!outmp) 381 if (!outmp)
311 return -ENOMEM; 382 return -ENOMEM;
312 383
313 status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0, 384 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
314 cnt+1, 0); 385 cnt+1, 0, retry);
315 if (status) { 386 if (status) {
316 lpfc_free_ct_rsp(phba, outmp); 387 lpfc_free_ct_rsp(phba, outmp);
317 return -ENOMEM; 388 return -ENOMEM;
@@ -319,20 +390,35 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
319 return 0; 390 return 0;
320} 391}
321 392
393static struct lpfc_vport *
394lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
395
396 struct lpfc_vport *vport_curr;
397
398 list_for_each_entry(vport_curr, &phba->port_list, listentry) {
399 if ((vport_curr->fc_myDID) &&
400 (vport_curr->fc_myDID == did))
401 return vport_curr;
402 }
403
404 return NULL;
405}
406
322static int 407static int
323lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size) 408lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
324{ 409{
410 struct lpfc_hba *phba = vport->phba;
325 struct lpfc_sli_ct_request *Response = 411 struct lpfc_sli_ct_request *Response =
326 (struct lpfc_sli_ct_request *) mp->virt; 412 (struct lpfc_sli_ct_request *) mp->virt;
327 struct lpfc_nodelist *ndlp = NULL; 413 struct lpfc_nodelist *ndlp = NULL;
328 struct lpfc_dmabuf *mlast, *next_mp; 414 struct lpfc_dmabuf *mlast, *next_mp;
329 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; 415 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
330 uint32_t Did; 416 uint32_t Did, CTentry;
331 uint32_t CTentry;
332 int Cnt; 417 int Cnt;
333 struct list_head head; 418 struct list_head head;
334 419
335 lpfc_set_disctmo(phba); 420 lpfc_set_disctmo(vport);
421 vport->num_disc_nodes = 0;
336 422
337 423
338 list_add_tail(&head, &mp->list); 424 list_add_tail(&head, &mp->list);
@@ -350,39 +436,96 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
350 436
351 /* Loop through entire NameServer list of DIDs */ 437 /* Loop through entire NameServer list of DIDs */
352 while (Cnt >= sizeof (uint32_t)) { 438 while (Cnt >= sizeof (uint32_t)) {
353
354 /* Get next DID from NameServer List */ 439 /* Get next DID from NameServer List */
355 CTentry = *ctptr++; 440 CTentry = *ctptr++;
356 Did = ((be32_to_cpu(CTentry)) & Mask_DID); 441 Did = ((be32_to_cpu(CTentry)) & Mask_DID);
357 442
358 ndlp = NULL; 443 ndlp = NULL;
359 if (Did != phba->fc_myDID) { 444
360 /* Check for rscn processing or not */ 445 /*
361 ndlp = lpfc_setup_disc_node(phba, Did); 446 * Check for rscn processing or not
362 } 447 * To conserve rpi's, filter out addresses for other
363 /* Mark all node table entries that are in the 448 * vports on the same physical HBAs.
364 Nameserver */ 449 */
365 if (ndlp) { 450 if ((Did != vport->fc_myDID) &&
366 /* NameServer Rsp */ 451 ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
367 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 452 phba->cfg_peer_port_login)) {
368 "%d:0238 Process x%x NameServer" 453 if ((vport->port_type != LPFC_NPIV_PORT) ||
369 " Rsp Data: x%x x%x x%x\n", 454 (vport->fc_flag & FC_RFF_NOT_SUPPORTED) ||
370 phba->brd_no, 455 (!phba->cfg_vport_restrict_login)) {
456 ndlp = lpfc_setup_disc_node(vport, Did);
457 if (ndlp) {
458 lpfc_debugfs_disc_trc(vport,
459 LPFC_DISC_TRC_CT,
460 "Parse GID_FTrsp: "
461 "did:x%x flg:x%x x%x",
371 Did, ndlp->nlp_flag, 462 Did, ndlp->nlp_flag,
372 phba->fc_flag, 463 vport->fc_flag);
373 phba->fc_rscn_id_cnt); 464
374 } else { 465 lpfc_printf_log(phba, KERN_INFO,
375 /* NameServer Rsp */ 466 LOG_DISCOVERY,
376 lpfc_printf_log(phba, 467 "%d (%d):0238 Process "
377 KERN_INFO, 468 "x%x NameServer Rsp"
378 LOG_DISCOVERY, 469 "Data: x%x x%x x%x\n",
379 "%d:0239 Skip x%x NameServer " 470 phba->brd_no,
380 "Rsp Data: x%x x%x x%x\n", 471 vport->vpi, Did,
381 phba->brd_no, 472 ndlp->nlp_flag,
382 Did, Size, phba->fc_flag, 473 vport->fc_flag,
383 phba->fc_rscn_id_cnt); 474 vport->fc_rscn_id_cnt);
475 } else {
476 lpfc_debugfs_disc_trc(vport,
477 LPFC_DISC_TRC_CT,
478 "Skip1 GID_FTrsp: "
479 "did:x%x flg:x%x cnt:%d",
480 Did, vport->fc_flag,
481 vport->fc_rscn_id_cnt);
482
483 lpfc_printf_log(phba, KERN_INFO,
484 LOG_DISCOVERY,
485 "%d (%d):0239 Skip x%x "
486 "NameServer Rsp Data: "
487 "x%x x%x\n",
488 phba->brd_no,
489 vport->vpi, Did,
490 vport->fc_flag,
491 vport->fc_rscn_id_cnt);
492 }
493
494 } else {
495 if (!(vport->fc_flag & FC_RSCN_MODE) ||
496 (lpfc_rscn_payload_check(vport, Did))) {
497 lpfc_debugfs_disc_trc(vport,
498 LPFC_DISC_TRC_CT,
499 "Query GID_FTrsp: "
500 "did:x%x flg:x%x cnt:%d",
501 Did, vport->fc_flag,
502 vport->fc_rscn_id_cnt);
503
504 if (lpfc_ns_cmd(vport,
505 SLI_CTNS_GFF_ID,
506 0, Did) == 0)
507 vport->num_disc_nodes++;
508 }
509 else {
510 lpfc_debugfs_disc_trc(vport,
511 LPFC_DISC_TRC_CT,
512 "Skip2 GID_FTrsp: "
513 "did:x%x flg:x%x cnt:%d",
514 Did, vport->fc_flag,
515 vport->fc_rscn_id_cnt);
516
517 lpfc_printf_log(phba, KERN_INFO,
518 LOG_DISCOVERY,
519 "%d (%d):0245 Skip x%x "
520 "NameServer Rsp Data: "
521 "x%x x%x\n",
522 phba->brd_no,
523 vport->vpi, Did,
524 vport->fc_flag,
525 vport->fc_rscn_id_cnt);
526 }
527 }
384 } 528 }
385
386 if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY))) 529 if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
387 goto nsout1; 530 goto nsout1;
388 Cnt -= sizeof (uint32_t); 531 Cnt -= sizeof (uint32_t);
@@ -393,190 +536,369 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
393 536
394nsout1: 537nsout1:
395 list_del(&head); 538 list_del(&head);
396
397 /*
398 * The driver has cycled through all Nports in the RSCN payload.
399 * Complete the handling by cleaning up and marking the
400 * current driver state.
401 */
402 if (phba->hba_state == LPFC_HBA_READY) {
403 lpfc_els_flush_rscn(phba);
404 spin_lock_irq(phba->host->host_lock);
405 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
406 spin_unlock_irq(phba->host->host_lock);
407 }
408 return 0; 539 return 0;
409} 540}
410 541
411
412
413
414static void 542static void
415lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 543lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
416 struct lpfc_iocbq * rspiocb) 544 struct lpfc_iocbq *rspiocb)
417{ 545{
546 struct lpfc_vport *vport = cmdiocb->vport;
547 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
418 IOCB_t *irsp; 548 IOCB_t *irsp;
419 struct lpfc_sli *psli;
420 struct lpfc_dmabuf *bmp; 549 struct lpfc_dmabuf *bmp;
421 struct lpfc_dmabuf *inp;
422 struct lpfc_dmabuf *outp; 550 struct lpfc_dmabuf *outp;
423 struct lpfc_nodelist *ndlp;
424 struct lpfc_sli_ct_request *CTrsp; 551 struct lpfc_sli_ct_request *CTrsp;
552 int rc;
425 553
426 psli = &phba->sli;
427 /* we pass cmdiocb to state machine which needs rspiocb as well */ 554 /* we pass cmdiocb to state machine which needs rspiocb as well */
428 cmdiocb->context_un.rsp_iocb = rspiocb; 555 cmdiocb->context_un.rsp_iocb = rspiocb;
429 556
430 inp = (struct lpfc_dmabuf *) cmdiocb->context1;
431 outp = (struct lpfc_dmabuf *) cmdiocb->context2; 557 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
432 bmp = (struct lpfc_dmabuf *) cmdiocb->context3; 558 bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
433
434 irsp = &rspiocb->iocb; 559 irsp = &rspiocb->iocb;
435 if (irsp->ulpStatus) {
436 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
437 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
438 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
439 goto out;
440 }
441 560
561 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
562 "GID_FT cmpl: status:x%x/x%x rtry:%d",
563 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
564
565 /* Don't bother processing response if vport is being torn down. */
566 if (vport->load_flag & FC_UNLOADING)
567 goto out;
568
569
570 if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) {
571 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
572 "%d (%d):0216 Link event during NS query\n",
573 phba->brd_no, vport->vpi);
574 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
575 goto out;
576 }
577
578 if (irsp->ulpStatus) {
442 /* Check for retry */ 579 /* Check for retry */
443 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) { 580 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
444 phba->fc_ns_retry++; 581 if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
582 (irsp->un.ulpWord[4] != IOERR_NO_RESOURCES))
583 vport->fc_ns_retry++;
445 /* CT command is being retried */ 584 /* CT command is being retried */
446 ndlp = lpfc_findnode_did(phba, NameServer_DID); 585 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
447 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 586 vport->fc_ns_retry, 0);
448 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 587 if (rc == 0)
449 0) { 588 goto out;
450 goto out;
451 }
452 }
453 } 589 }
590 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
591 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
592 "%d (%d):0257 GID_FT Query error: 0x%x 0x%x\n",
593 phba->brd_no, vport->vpi, irsp->ulpStatus,
594 vport->fc_ns_retry);
454 } else { 595 } else {
455 /* Good status, continue checking */ 596 /* Good status, continue checking */
456 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 597 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
457 if (CTrsp->CommandResponse.bits.CmdRsp == 598 if (CTrsp->CommandResponse.bits.CmdRsp ==
458 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { 599 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
459 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 600 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
460 "%d:0208 NameServer Rsp " 601 "%d (%d):0208 NameServer Rsp "
461 "Data: x%x\n", 602 "Data: x%x\n",
462 phba->brd_no, 603 phba->brd_no, vport->vpi,
463 phba->fc_flag); 604 vport->fc_flag);
464 lpfc_ns_rsp(phba, outp, 605 lpfc_ns_rsp(vport, outp,
465 (uint32_t) (irsp->un.genreq64.bdl.bdeSize)); 606 (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
466 } else if (CTrsp->CommandResponse.bits.CmdRsp == 607 } else if (CTrsp->CommandResponse.bits.CmdRsp ==
467 be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 608 be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
468 /* NameServer Rsp Error */ 609 /* NameServer Rsp Error */
469 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 610 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
470 "%d:0240 NameServer Rsp Error " 611 "%d (%d):0240 NameServer Rsp Error "
471 "Data: x%x x%x x%x x%x\n", 612 "Data: x%x x%x x%x x%x\n",
472 phba->brd_no, 613 phba->brd_no, vport->vpi,
473 CTrsp->CommandResponse.bits.CmdRsp, 614 CTrsp->CommandResponse.bits.CmdRsp,
474 (uint32_t) CTrsp->ReasonCode, 615 (uint32_t) CTrsp->ReasonCode,
475 (uint32_t) CTrsp->Explanation, 616 (uint32_t) CTrsp->Explanation,
476 phba->fc_flag); 617 vport->fc_flag);
618
619 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
620 "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x",
621 (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
622 (uint32_t) CTrsp->ReasonCode,
623 (uint32_t) CTrsp->Explanation);
624
477 } else { 625 } else {
478 /* NameServer Rsp Error */ 626 /* NameServer Rsp Error */
479 lpfc_printf_log(phba, 627 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
480 KERN_INFO, 628 "%d (%d):0241 NameServer Rsp Error "
481 LOG_DISCOVERY,
482 "%d:0241 NameServer Rsp Error "
483 "Data: x%x x%x x%x x%x\n", 629 "Data: x%x x%x x%x x%x\n",
484 phba->brd_no, 630 phba->brd_no, vport->vpi,
485 CTrsp->CommandResponse.bits.CmdRsp, 631 CTrsp->CommandResponse.bits.CmdRsp,
486 (uint32_t) CTrsp->ReasonCode, 632 (uint32_t) CTrsp->ReasonCode,
487 (uint32_t) CTrsp->Explanation, 633 (uint32_t) CTrsp->Explanation,
488 phba->fc_flag); 634 vport->fc_flag);
635
636 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
637 "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x",
638 (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
639 (uint32_t) CTrsp->ReasonCode,
640 (uint32_t) CTrsp->Explanation);
489 } 641 }
490 } 642 }
491 /* Link up / RSCN discovery */ 643 /* Link up / RSCN discovery */
492 lpfc_disc_start(phba); 644 if (vport->num_disc_nodes == 0) {
645 /*
646 * The driver has cycled through all Nports in the RSCN payload.
647 * Complete the handling by cleaning up and marking the
648 * current driver state.
649 */
650 if (vport->port_state >= LPFC_DISC_AUTH) {
651 if (vport->fc_flag & FC_RSCN_MODE) {
652 lpfc_els_flush_rscn(vport);
653 spin_lock_irq(shost->host_lock);
654 vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
655 spin_unlock_irq(shost->host_lock);
656 }
657 else
658 lpfc_els_flush_rscn(vport);
659 }
660
661 lpfc_disc_start(vport);
662 }
493out: 663out:
494 lpfc_free_ct_rsp(phba, outp); 664 lpfc_ct_free_iocb(phba, cmdiocb);
495 lpfc_mbuf_free(phba, inp->virt, inp->phys);
496 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
497 kfree(inp);
498 kfree(bmp);
499 spin_lock_irq(phba->host->host_lock);
500 lpfc_sli_release_iocbq(phba, cmdiocb);
501 spin_unlock_irq(phba->host->host_lock);
502 return; 665 return;
503} 666}
504 667
668void
669lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
670 struct lpfc_iocbq *rspiocb)
671{
672 struct lpfc_vport *vport = cmdiocb->vport;
673 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
674 IOCB_t *irsp = &rspiocb->iocb;
675 struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
676 struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
677 struct lpfc_sli_ct_request *CTrsp;
678 int did;
679 uint8_t fbits;
680 struct lpfc_nodelist *ndlp;
681
682 did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
683 did = be32_to_cpu(did);
684
685 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
686 "GFF_ID cmpl: status:x%x/x%x did:x%x",
687 irsp->ulpStatus, irsp->un.ulpWord[4], did);
688
689 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
690 /* Good status, continue checking */
691 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
692 fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
693
694 if (CTrsp->CommandResponse.bits.CmdRsp ==
695 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
696 if ((fbits & FC4_FEATURE_INIT) &&
697 !(fbits & FC4_FEATURE_TARGET)) {
698 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
699 "%d (%d):0245 Skip x%x GFF "
700 "NameServer Rsp Data: (init) "
701 "x%x x%x\n", phba->brd_no,
702 vport->vpi, did, fbits,
703 vport->fc_rscn_id_cnt);
704 goto out;
705 }
706 }
707 }
708 else {
709 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
710 "%d (%d):0267 NameServer GFF Rsp"
711 " x%x Error (%d %d) Data: x%x x%x\n",
712 phba->brd_no, vport->vpi, did,
713 irsp->ulpStatus, irsp->un.ulpWord[4],
714 vport->fc_flag, vport->fc_rscn_id_cnt)
715 }
716
717 /* This is a target port, unregistered port, or the GFF_ID failed */
718 ndlp = lpfc_setup_disc_node(vport, did);
719 if (ndlp) {
720 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
721 "%d (%d):0242 Process x%x GFF "
722 "NameServer Rsp Data: x%x x%x x%x\n",
723 phba->brd_no, vport->vpi,
724 did, ndlp->nlp_flag, vport->fc_flag,
725 vport->fc_rscn_id_cnt);
726 } else {
727 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
728 "%d (%d):0243 Skip x%x GFF "
729 "NameServer Rsp Data: x%x x%x\n",
730 phba->brd_no, vport->vpi, did,
731 vport->fc_flag, vport->fc_rscn_id_cnt);
732 }
733out:
734 /* Link up / RSCN discovery */
735 if (vport->num_disc_nodes)
736 vport->num_disc_nodes--;
737 if (vport->num_disc_nodes == 0) {
738 /*
739 * The driver has cycled through all Nports in the RSCN payload.
740 * Complete the handling by cleaning up and marking the
741 * current driver state.
742 */
743 if (vport->port_state >= LPFC_DISC_AUTH) {
744 if (vport->fc_flag & FC_RSCN_MODE) {
745 lpfc_els_flush_rscn(vport);
746 spin_lock_irq(shost->host_lock);
747 vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
748 spin_unlock_irq(shost->host_lock);
749 }
750 else
751 lpfc_els_flush_rscn(vport);
752 }
753 lpfc_disc_start(vport);
754 }
755 lpfc_ct_free_iocb(phba, cmdiocb);
756 return;
757}
758
759
505static void 760static void
506lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 761lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
507 struct lpfc_iocbq * rspiocb) 762 struct lpfc_iocbq *rspiocb)
508{ 763{
509 struct lpfc_sli *psli; 764 struct lpfc_vport *vport = cmdiocb->vport;
510 struct lpfc_dmabuf *bmp;
511 struct lpfc_dmabuf *inp; 765 struct lpfc_dmabuf *inp;
512 struct lpfc_dmabuf *outp; 766 struct lpfc_dmabuf *outp;
513 IOCB_t *irsp; 767 IOCB_t *irsp;
514 struct lpfc_sli_ct_request *CTrsp; 768 struct lpfc_sli_ct_request *CTrsp;
769 int cmdcode, rc;
770 uint8_t retry;
771 uint32_t latt;
515 772
516 psli = &phba->sli;
517 /* we pass cmdiocb to state machine which needs rspiocb as well */ 773 /* we pass cmdiocb to state machine which needs rspiocb as well */
518 cmdiocb->context_un.rsp_iocb = rspiocb; 774 cmdiocb->context_un.rsp_iocb = rspiocb;
519 775
520 inp = (struct lpfc_dmabuf *) cmdiocb->context1; 776 inp = (struct lpfc_dmabuf *) cmdiocb->context1;
521 outp = (struct lpfc_dmabuf *) cmdiocb->context2; 777 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
522 bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
523 irsp = &rspiocb->iocb; 778 irsp = &rspiocb->iocb;
524 779
780 cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
781 CommandResponse.bits.CmdRsp);
525 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 782 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
526 783
784 latt = lpfc_els_chk_latt(vport);
785
527 /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */ 786 /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
528 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 787 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
529 "%d:0209 RFT request completes ulpStatus x%x " 788 "%d (%d):0209 RFT request completes, latt %d, "
530 "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus, 789 "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
531 CTrsp->CommandResponse.bits.CmdRsp); 790 phba->brd_no, vport->vpi, latt, irsp->ulpStatus,
791 CTrsp->CommandResponse.bits.CmdRsp,
792 cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
532 793
533 lpfc_free_ct_rsp(phba, outp); 794 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
534 lpfc_mbuf_free(phba, inp->virt, inp->phys); 795 "CT cmd cmpl: status:x%x/x%x cmd:x%x",
535 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 796 irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
536 kfree(inp); 797
537 kfree(bmp); 798 if (irsp->ulpStatus) {
538 spin_lock_irq(phba->host->host_lock); 799 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
539 lpfc_sli_release_iocbq(phba, cmdiocb); 800 "%d (%d):0268 NS cmd %x Error (%d %d)\n",
540 spin_unlock_irq(phba->host->host_lock); 801 phba->brd_no, vport->vpi, cmdcode,
802 irsp->ulpStatus, irsp->un.ulpWord[4]);
803
804 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
805 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
806 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
807 goto out;
808
809 retry = cmdiocb->retry;
810 if (retry >= LPFC_MAX_NS_RETRY)
811 goto out;
812
813 retry++;
814 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
815 "%d (%d):0216 Retrying NS cmd %x\n",
816 phba->brd_no, vport->vpi, cmdcode);
817 rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
818 if (rc == 0)
819 goto out;
820 }
821
822out:
823 lpfc_ct_free_iocb(phba, cmdiocb);
541 return; 824 return;
542} 825}
543 826
544static void 827static void
545lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 828lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
546 struct lpfc_iocbq * rspiocb) 829 struct lpfc_iocbq *rspiocb)
547{ 830{
548 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 831 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
549 return; 832 return;
550} 833}
551 834
552static void 835static void
553lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 836lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
554 struct lpfc_iocbq * rspiocb) 837 struct lpfc_iocbq *rspiocb)
555{ 838{
556 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 839 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
557 return; 840 return;
558} 841}
559 842
560static void 843static void
561lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 844lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
562 struct lpfc_iocbq * rspiocb) 845 struct lpfc_iocbq *rspiocb)
563{ 846{
564 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 847 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
565 return; 848 return;
566} 849}
567 850
568void 851static void
569lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) 852lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
853 struct lpfc_iocbq *rspiocb)
570{ 854{
571 char fwrev[16]; 855 IOCB_t *irsp = &rspiocb->iocb;
856 struct lpfc_vport *vport = cmdiocb->vport;
572 857
573 lpfc_decode_firmware_rev(phba, fwrev, 0); 858 if (irsp->ulpStatus != IOSTAT_SUCCESS)
859 vport->fc_flag |= FC_RFF_NOT_SUPPORTED;
574 860
575 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, 861 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
576 fwrev, lpfc_release_version);
577 return; 862 return;
578} 863}
579 864
865int
866lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
867 size_t size)
868{
869 int n;
870 uint8_t *wwn = vport->phba->wwpn;
871
872 n = snprintf(symbol, size,
873 "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
874 wwn[0], wwn[1], wwn[2], wwn[3],
875 wwn[4], wwn[5], wwn[6], wwn[7]);
876
877 if (vport->port_type == LPFC_PHYSICAL_PORT)
878 return n;
879
880 if (n < size)
881 n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
882
883 if (n < size && vport->vname)
884 n += snprintf(symbol + n, size - n, " VName-%s", vport->vname);
885 return n;
886}
887
888int
889lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
890 size_t size)
891{
892 char fwrev[16];
893 int n;
894
895 lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
896
897 n = snprintf(symbol, size, "Emulex %s FV%s DV%s",
898 vport->phba->ModelName, fwrev, lpfc_release_version);
899 return n;
900}
901
580/* 902/*
581 * lpfc_ns_cmd 903 * lpfc_ns_cmd
582 * Description: 904 * Description:
@@ -585,55 +907,76 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
585 * LI_CTNS_RFT_ID 907 * LI_CTNS_RFT_ID
586 */ 908 */
587int 909int
588lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) 910lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
911 uint8_t retry, uint32_t context)
589{ 912{
913 struct lpfc_nodelist * ndlp;
914 struct lpfc_hba *phba = vport->phba;
590 struct lpfc_dmabuf *mp, *bmp; 915 struct lpfc_dmabuf *mp, *bmp;
591 struct lpfc_sli_ct_request *CtReq; 916 struct lpfc_sli_ct_request *CtReq;
592 struct ulp_bde64 *bpl; 917 struct ulp_bde64 *bpl;
593 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 918 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
594 struct lpfc_iocbq *) = NULL; 919 struct lpfc_iocbq *) = NULL;
595 uint32_t rsp_size = 1024; 920 uint32_t rsp_size = 1024;
921 size_t size;
922 int rc = 0;
923
924 ndlp = lpfc_findnode_did(vport, NameServer_DID);
925 if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
926 rc=1;
927 goto ns_cmd_exit;
928 }
596 929
597 /* fill in BDEs for command */ 930 /* fill in BDEs for command */
598 /* Allocate buffer for command payload */ 931 /* Allocate buffer for command payload */
599 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 932 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
600 if (!mp) 933 if (!mp) {
934 rc=2;
601 goto ns_cmd_exit; 935 goto ns_cmd_exit;
936 }
602 937
603 INIT_LIST_HEAD(&mp->list); 938 INIT_LIST_HEAD(&mp->list);
604 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 939 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
605 if (!mp->virt) 940 if (!mp->virt) {
941 rc=3;
606 goto ns_cmd_free_mp; 942 goto ns_cmd_free_mp;
943 }
607 944
608 /* Allocate buffer for Buffer ptr list */ 945 /* Allocate buffer for Buffer ptr list */
609 bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 946 bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
610 if (!bmp) 947 if (!bmp) {
948 rc=4;
611 goto ns_cmd_free_mpvirt; 949 goto ns_cmd_free_mpvirt;
950 }
612 951
613 INIT_LIST_HEAD(&bmp->list); 952 INIT_LIST_HEAD(&bmp->list);
614 bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys)); 953 bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
615 if (!bmp->virt) 954 if (!bmp->virt) {
955 rc=5;
616 goto ns_cmd_free_bmp; 956 goto ns_cmd_free_bmp;
957 }
617 958
618 /* NameServer Req */ 959 /* NameServer Req */
619 lpfc_printf_log(phba, 960 lpfc_printf_log(phba, KERN_INFO ,LOG_DISCOVERY,
620 KERN_INFO, 961 "%d (%d):0236 NameServer Req Data: x%x x%x x%x\n",
621 LOG_DISCOVERY, 962 phba->brd_no, vport->vpi, cmdcode, vport->fc_flag,
622 "%d:0236 NameServer Req Data: x%x x%x x%x\n", 963 vport->fc_rscn_id_cnt);
623 phba->brd_no, cmdcode, phba->fc_flag,
624 phba->fc_rscn_id_cnt);
625 964
626 bpl = (struct ulp_bde64 *) bmp->virt; 965 bpl = (struct ulp_bde64 *) bmp->virt;
627 memset(bpl, 0, sizeof(struct ulp_bde64)); 966 memset(bpl, 0, sizeof(struct ulp_bde64));
628 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); 967 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
629 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); 968 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
630 bpl->tus.f.bdeFlags = 0; 969 bpl->tus.f.bdeFlags = 0;
631 if (cmdcode == SLI_CTNS_GID_FT) 970 if (cmdcode == SLI_CTNS_GID_FT)
632 bpl->tus.f.bdeSize = GID_REQUEST_SZ; 971 bpl->tus.f.bdeSize = GID_REQUEST_SZ;
972 else if (cmdcode == SLI_CTNS_GFF_ID)
973 bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
633 else if (cmdcode == SLI_CTNS_RFT_ID) 974 else if (cmdcode == SLI_CTNS_RFT_ID)
634 bpl->tus.f.bdeSize = RFT_REQUEST_SZ; 975 bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
635 else if (cmdcode == SLI_CTNS_RNN_ID) 976 else if (cmdcode == SLI_CTNS_RNN_ID)
636 bpl->tus.f.bdeSize = RNN_REQUEST_SZ; 977 bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
978 else if (cmdcode == SLI_CTNS_RSPN_ID)
979 bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
637 else if (cmdcode == SLI_CTNS_RSNN_NN) 980 else if (cmdcode == SLI_CTNS_RSNN_NN)
638 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; 981 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
639 else if (cmdcode == SLI_CTNS_RFF_ID) 982 else if (cmdcode == SLI_CTNS_RFF_ID)
@@ -654,56 +997,78 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
654 CtReq->CommandResponse.bits.CmdRsp = 997 CtReq->CommandResponse.bits.CmdRsp =
655 be16_to_cpu(SLI_CTNS_GID_FT); 998 be16_to_cpu(SLI_CTNS_GID_FT);
656 CtReq->un.gid.Fc4Type = SLI_CTPT_FCP; 999 CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
657 if (phba->hba_state < LPFC_HBA_READY) 1000 if (vport->port_state < LPFC_NS_QRY)
658 phba->hba_state = LPFC_NS_QRY; 1001 vport->port_state = LPFC_NS_QRY;
659 lpfc_set_disctmo(phba); 1002 lpfc_set_disctmo(vport);
660 cmpl = lpfc_cmpl_ct_cmd_gid_ft; 1003 cmpl = lpfc_cmpl_ct_cmd_gid_ft;
661 rsp_size = FC_MAX_NS_RSP; 1004 rsp_size = FC_MAX_NS_RSP;
662 break; 1005 break;
663 1006
1007 case SLI_CTNS_GFF_ID:
1008 CtReq->CommandResponse.bits.CmdRsp =
1009 be16_to_cpu(SLI_CTNS_GFF_ID);
1010 CtReq->un.gff.PortId = be32_to_cpu(context);
1011 cmpl = lpfc_cmpl_ct_cmd_gff_id;
1012 break;
1013
664 case SLI_CTNS_RFT_ID: 1014 case SLI_CTNS_RFT_ID:
665 CtReq->CommandResponse.bits.CmdRsp = 1015 CtReq->CommandResponse.bits.CmdRsp =
666 be16_to_cpu(SLI_CTNS_RFT_ID); 1016 be16_to_cpu(SLI_CTNS_RFT_ID);
667 CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID); 1017 CtReq->un.rft.PortId = be32_to_cpu(vport->fc_myDID);
668 CtReq->un.rft.fcpReg = 1; 1018 CtReq->un.rft.fcpReg = 1;
669 cmpl = lpfc_cmpl_ct_cmd_rft_id; 1019 cmpl = lpfc_cmpl_ct_cmd_rft_id;
670 break; 1020 break;
671 1021
672 case SLI_CTNS_RFF_ID:
673 CtReq->CommandResponse.bits.CmdRsp =
674 be16_to_cpu(SLI_CTNS_RFF_ID);
675 CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
676 CtReq->un.rff.feature_res = 0;
677 CtReq->un.rff.feature_tgt = 0;
678 CtReq->un.rff.type_code = FC_FCP_DATA;
679 CtReq->un.rff.feature_init = 1;
680 cmpl = lpfc_cmpl_ct_cmd_rff_id;
681 break;
682
683 case SLI_CTNS_RNN_ID: 1022 case SLI_CTNS_RNN_ID:
684 CtReq->CommandResponse.bits.CmdRsp = 1023 CtReq->CommandResponse.bits.CmdRsp =
685 be16_to_cpu(SLI_CTNS_RNN_ID); 1024 be16_to_cpu(SLI_CTNS_RNN_ID);
686 CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID); 1025 CtReq->un.rnn.PortId = be32_to_cpu(vport->fc_myDID);
687 memcpy(CtReq->un.rnn.wwnn, &phba->fc_nodename, 1026 memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename,
688 sizeof (struct lpfc_name)); 1027 sizeof (struct lpfc_name));
689 cmpl = lpfc_cmpl_ct_cmd_rnn_id; 1028 cmpl = lpfc_cmpl_ct_cmd_rnn_id;
690 break; 1029 break;
691 1030
1031 case SLI_CTNS_RSPN_ID:
1032 CtReq->CommandResponse.bits.CmdRsp =
1033 be16_to_cpu(SLI_CTNS_RSPN_ID);
1034 CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID);
1035 size = sizeof(CtReq->un.rspn.symbname);
1036 CtReq->un.rspn.len =
1037 lpfc_vport_symbolic_port_name(vport,
1038 CtReq->un.rspn.symbname, size);
1039 cmpl = lpfc_cmpl_ct_cmd_rspn_id;
1040 break;
692 case SLI_CTNS_RSNN_NN: 1041 case SLI_CTNS_RSNN_NN:
693 CtReq->CommandResponse.bits.CmdRsp = 1042 CtReq->CommandResponse.bits.CmdRsp =
694 be16_to_cpu(SLI_CTNS_RSNN_NN); 1043 be16_to_cpu(SLI_CTNS_RSNN_NN);
695 memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename, 1044 memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
696 sizeof (struct lpfc_name)); 1045 sizeof (struct lpfc_name));
697 lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname); 1046 size = sizeof(CtReq->un.rsnn.symbname);
698 CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname); 1047 CtReq->un.rsnn.len =
1048 lpfc_vport_symbolic_node_name(vport,
1049 CtReq->un.rsnn.symbname, size);
699 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; 1050 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
700 break; 1051 break;
1052 case SLI_CTNS_RFF_ID:
1053 vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED;
1054 CtReq->CommandResponse.bits.CmdRsp =
1055 be16_to_cpu(SLI_CTNS_RFF_ID);
1056 CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);;
1057 CtReq->un.rff.fbits = FC4_FEATURE_INIT;
1058 CtReq->un.rff.type_code = FC_FCP_DATA;
1059 cmpl = lpfc_cmpl_ct_cmd_rff_id;
1060 break;
701 } 1061 }
702 1062
703 if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size)) 1063 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
704 /* On success, The cmpl function will free the buffers */ 1064 /* On success, The cmpl function will free the buffers */
1065 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1066 "Issue CT cmd: cmd:x%x did:x%x",
1067 cmdcode, ndlp->nlp_DID, 0);
705 return 0; 1068 return 0;
1069 }
706 1070
1071 rc=6;
707 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1072 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
708ns_cmd_free_bmp: 1073ns_cmd_free_bmp:
709 kfree(bmp); 1074 kfree(bmp);
@@ -712,14 +1077,17 @@ ns_cmd_free_mpvirt:
712ns_cmd_free_mp: 1077ns_cmd_free_mp:
713 kfree(mp); 1078 kfree(mp);
714ns_cmd_exit: 1079ns_cmd_exit:
1080 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
1081 "%d (%d):0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
1082 phba->brd_no, vport->vpi, cmdcode, rc, vport->fc_flag,
1083 vport->fc_rscn_id_cnt);
715 return 1; 1084 return 1;
716} 1085}
717 1086
718static void 1087static void
719lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba, 1088lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
720 struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb) 1089 struct lpfc_iocbq * rspiocb)
721{ 1090{
722 struct lpfc_dmabuf *bmp = cmdiocb->context3;
723 struct lpfc_dmabuf *inp = cmdiocb->context1; 1091 struct lpfc_dmabuf *inp = cmdiocb->context1;
724 struct lpfc_dmabuf *outp = cmdiocb->context2; 1092 struct lpfc_dmabuf *outp = cmdiocb->context2;
725 struct lpfc_sli_ct_request *CTrsp = outp->virt; 1093 struct lpfc_sli_ct_request *CTrsp = outp->virt;
@@ -727,48 +1095,60 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
727 struct lpfc_nodelist *ndlp; 1095 struct lpfc_nodelist *ndlp;
728 uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; 1096 uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
729 uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; 1097 uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
1098 struct lpfc_vport *vport = cmdiocb->vport;
1099 IOCB_t *irsp = &rspiocb->iocb;
1100 uint32_t latt;
1101
1102 latt = lpfc_els_chk_latt(vport);
1103
1104 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1105 "FDMI cmpl: status:x%x/x%x latt:%d",
1106 irsp->ulpStatus, irsp->un.ulpWord[4], latt);
1107
1108 if (latt || irsp->ulpStatus) {
1109 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1110 "%d (%d):0229 FDMI cmd %04x failed, latt = %d "
1111 "ulpStatus: x%x, rid x%x\n",
1112 phba->brd_no, vport->vpi,
1113 be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
1114 irsp->un.ulpWord[4]);
1115 lpfc_ct_free_iocb(phba, cmdiocb);
1116 return;
1117 }
730 1118
731 ndlp = lpfc_findnode_did(phba, FDMI_DID); 1119 ndlp = lpfc_findnode_did(vport, FDMI_DID);
732 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 1120 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
733 /* FDMI rsp failed */ 1121 /* FDMI rsp failed */
734 lpfc_printf_log(phba, 1122 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
735 KERN_INFO, 1123 "%d (%d):0220 FDMI rsp failed Data: x%x\n",
736 LOG_DISCOVERY, 1124 phba->brd_no, vport->vpi,
737 "%d:0220 FDMI rsp failed Data: x%x\n", 1125 be16_to_cpu(fdmi_cmd));
738 phba->brd_no,
739 be16_to_cpu(fdmi_cmd));
740 } 1126 }
741 1127
742 switch (be16_to_cpu(fdmi_cmd)) { 1128 switch (be16_to_cpu(fdmi_cmd)) {
743 case SLI_MGMT_RHBA: 1129 case SLI_MGMT_RHBA:
744 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA); 1130 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA);
745 break; 1131 break;
746 1132
747 case SLI_MGMT_RPA: 1133 case SLI_MGMT_RPA:
748 break; 1134 break;
749 1135
750 case SLI_MGMT_DHBA: 1136 case SLI_MGMT_DHBA:
751 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT); 1137 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT);
752 break; 1138 break;
753 1139
754 case SLI_MGMT_DPRT: 1140 case SLI_MGMT_DPRT:
755 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA); 1141 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
756 break; 1142 break;
757 } 1143 }
758 1144 lpfc_ct_free_iocb(phba, cmdiocb);
759 lpfc_free_ct_rsp(phba, outp);
760 lpfc_mbuf_free(phba, inp->virt, inp->phys);
761 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
762 kfree(inp);
763 kfree(bmp);
764 spin_lock_irq(phba->host->host_lock);
765 lpfc_sli_release_iocbq(phba, cmdiocb);
766 spin_unlock_irq(phba->host->host_lock);
767 return; 1145 return;
768} 1146}
1147
769int 1148int
770lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) 1149lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
771{ 1150{
1151 struct lpfc_hba *phba = vport->phba;
772 struct lpfc_dmabuf *mp, *bmp; 1152 struct lpfc_dmabuf *mp, *bmp;
773 struct lpfc_sli_ct_request *CtReq; 1153 struct lpfc_sli_ct_request *CtReq;
774 struct ulp_bde64 *bpl; 1154 struct ulp_bde64 *bpl;
@@ -805,12 +1185,10 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
805 INIT_LIST_HEAD(&bmp->list); 1185 INIT_LIST_HEAD(&bmp->list);
806 1186
807 /* FDMI request */ 1187 /* FDMI request */
808 lpfc_printf_log(phba, 1188 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
809 KERN_INFO, 1189 "%d (%d):0218 FDMI Request Data: x%x x%x x%x\n",
810 LOG_DISCOVERY, 1190 phba->brd_no, vport->vpi, vport->fc_flag,
811 "%d:0218 FDMI Request Data: x%x x%x x%x\n", 1191 vport->port_state, cmdcode);
812 phba->brd_no,
813 phba->fc_flag, phba->hba_state, cmdcode);
814 1192
815 CtReq = (struct lpfc_sli_ct_request *) mp->virt; 1193 CtReq = (struct lpfc_sli_ct_request *) mp->virt;
816 1194
@@ -833,11 +1211,11 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
833 be16_to_cpu(SLI_MGMT_RHBA); 1211 be16_to_cpu(SLI_MGMT_RHBA);
834 CtReq->CommandResponse.bits.Size = 0; 1212 CtReq->CommandResponse.bits.Size = 0;
835 rh = (REG_HBA *) & CtReq->un.PortID; 1213 rh = (REG_HBA *) & CtReq->un.PortID;
836 memcpy(&rh->hi.PortName, &phba->fc_sparam.portName, 1214 memcpy(&rh->hi.PortName, &vport->fc_sparam.portName,
837 sizeof (struct lpfc_name)); 1215 sizeof (struct lpfc_name));
838 /* One entry (port) per adapter */ 1216 /* One entry (port) per adapter */
839 rh->rpl.EntryCnt = be32_to_cpu(1); 1217 rh->rpl.EntryCnt = be32_to_cpu(1);
840 memcpy(&rh->rpl.pe, &phba->fc_sparam.portName, 1218 memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
841 sizeof (struct lpfc_name)); 1219 sizeof (struct lpfc_name));
842 1220
843 /* point to the HBA attribute block */ 1221 /* point to the HBA attribute block */
@@ -853,7 +1231,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
853 ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME); 1231 ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
854 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES 1232 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
855 + sizeof (struct lpfc_name)); 1233 + sizeof (struct lpfc_name));
856 memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName, 1234 memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
857 sizeof (struct lpfc_name)); 1235 sizeof (struct lpfc_name));
858 ab->EntryCnt++; 1236 ab->EntryCnt++;
859 size += FOURBYTES + sizeof (struct lpfc_name); 1237 size += FOURBYTES + sizeof (struct lpfc_name);
@@ -991,7 +1369,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
991 pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID; 1369 pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
992 size = sizeof (struct lpfc_name) + FOURBYTES; 1370 size = sizeof (struct lpfc_name) + FOURBYTES;
993 memcpy((uint8_t *) & pab->PortName, 1371 memcpy((uint8_t *) & pab->PortName,
994 (uint8_t *) & phba->fc_sparam.portName, 1372 (uint8_t *) & vport->fc_sparam.portName,
995 sizeof (struct lpfc_name)); 1373 sizeof (struct lpfc_name));
996 pab->ab.EntryCnt = 0; 1374 pab->ab.EntryCnt = 0;
997 1375
@@ -1053,7 +1431,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
1053 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size); 1431 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
1054 ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE); 1432 ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
1055 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); 1433 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
1056 hsp = (struct serv_parm *) & phba->fc_sparam; 1434 hsp = (struct serv_parm *) & vport->fc_sparam;
1057 ae->un.MaxFrameSize = 1435 ae->un.MaxFrameSize =
1058 (((uint32_t) hsp->cmn. 1436 (((uint32_t) hsp->cmn.
1059 bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn. 1437 bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
@@ -1097,7 +1475,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
1097 CtReq->CommandResponse.bits.Size = 0; 1475 CtReq->CommandResponse.bits.Size = 0;
1098 pe = (PORT_ENTRY *) & CtReq->un.PortID; 1476 pe = (PORT_ENTRY *) & CtReq->un.PortID;
1099 memcpy((uint8_t *) & pe->PortName, 1477 memcpy((uint8_t *) & pe->PortName,
1100 (uint8_t *) & phba->fc_sparam.portName, 1478 (uint8_t *) & vport->fc_sparam.portName,
1101 sizeof (struct lpfc_name)); 1479 sizeof (struct lpfc_name));
1102 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name); 1480 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
1103 break; 1481 break;
@@ -1107,22 +1485,22 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
1107 CtReq->CommandResponse.bits.Size = 0; 1485 CtReq->CommandResponse.bits.Size = 0;
1108 pe = (PORT_ENTRY *) & CtReq->un.PortID; 1486 pe = (PORT_ENTRY *) & CtReq->un.PortID;
1109 memcpy((uint8_t *) & pe->PortName, 1487 memcpy((uint8_t *) & pe->PortName,
1110 (uint8_t *) & phba->fc_sparam.portName, 1488 (uint8_t *) & vport->fc_sparam.portName,
1111 sizeof (struct lpfc_name)); 1489 sizeof (struct lpfc_name));
1112 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name); 1490 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
1113 break; 1491 break;
1114 } 1492 }
1115 1493
1116 bpl = (struct ulp_bde64 *) bmp->virt; 1494 bpl = (struct ulp_bde64 *) bmp->virt;
1117 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); 1495 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
1118 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); 1496 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
1119 bpl->tus.f.bdeFlags = 0; 1497 bpl->tus.f.bdeFlags = 0;
1120 bpl->tus.f.bdeSize = size; 1498 bpl->tus.f.bdeSize = size;
1121 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1499 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1122 1500
1123 cmpl = lpfc_cmpl_ct_cmd_fdmi; 1501 cmpl = lpfc_cmpl_ct_cmd_fdmi;
1124 1502
1125 if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP)) 1503 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
1126 return 0; 1504 return 0;
1127 1505
1128 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1506 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
@@ -1134,49 +1512,50 @@ fdmi_cmd_free_mp:
1134 kfree(mp); 1512 kfree(mp);
1135fdmi_cmd_exit: 1513fdmi_cmd_exit:
1136 /* Issue FDMI request failed */ 1514 /* Issue FDMI request failed */
1137 lpfc_printf_log(phba, 1515 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1138 KERN_INFO, 1516 "%d (%d):0244 Issue FDMI request failed Data: x%x\n",
1139 LOG_DISCOVERY, 1517 phba->brd_no, vport->vpi, cmdcode);
1140 "%d:0244 Issue FDMI request failed Data: x%x\n",
1141 phba->brd_no,
1142 cmdcode);
1143 return 1; 1518 return 1;
1144} 1519}
1145 1520
1146void 1521void
1147lpfc_fdmi_tmo(unsigned long ptr) 1522lpfc_fdmi_tmo(unsigned long ptr)
1148{ 1523{
1149 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 1524 struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
1525 struct lpfc_hba *phba = vport->phba;
1150 unsigned long iflag; 1526 unsigned long iflag;
1151 1527
1152 spin_lock_irqsave(phba->host->host_lock, iflag); 1528 spin_lock_irqsave(&vport->work_port_lock, iflag);
1153 if (!(phba->work_hba_events & WORKER_FDMI_TMO)) { 1529 if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
1154 phba->work_hba_events |= WORKER_FDMI_TMO; 1530 vport->work_port_events |= WORKER_FDMI_TMO;
1531 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1532
1533 spin_lock_irqsave(&phba->hbalock, iflag);
1155 if (phba->work_wait) 1534 if (phba->work_wait)
1156 wake_up(phba->work_wait); 1535 lpfc_worker_wake_up(phba);
1536 spin_unlock_irqrestore(&phba->hbalock, iflag);
1157 } 1537 }
1158 spin_unlock_irqrestore(phba->host->host_lock,iflag); 1538 else
1539 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1159} 1540}
1160 1541
1161void 1542void
1162lpfc_fdmi_tmo_handler(struct lpfc_hba *phba) 1543lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
1163{ 1544{
1164 struct lpfc_nodelist *ndlp; 1545 struct lpfc_nodelist *ndlp;
1165 1546
1166 ndlp = lpfc_findnode_did(phba, FDMI_DID); 1547 ndlp = lpfc_findnode_did(vport, FDMI_DID);
1167 if (ndlp) { 1548 if (ndlp) {
1168 if (init_utsname()->nodename[0] != '\0') { 1549 if (init_utsname()->nodename[0] != '\0')
1169 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA); 1550 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
1170 } else { 1551 else
1171 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60); 1552 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
1172 }
1173 } 1553 }
1174 return; 1554 return;
1175} 1555}
1176 1556
1177
1178void 1557void
1179lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag) 1558lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1180{ 1559{
1181 struct lpfc_sli *psli = &phba->sli; 1560 struct lpfc_sli *psli = &phba->sli;
1182 lpfc_vpd_t *vp = &phba->vpd; 1561 lpfc_vpd_t *vp = &phba->vpd;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
new file mode 100644
index 000000000000..673cfe11cc2b
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -0,0 +1,508 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#include <linux/blkdev.h>
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/idr.h>
25#include <linux/interrupt.h>
26#include <linux/kthread.h>
27#include <linux/pci.h>
28#include <linux/spinlock.h>
29#include <linux/ctype.h>
30#include <linux/version.h>
31
32#include <scsi/scsi.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
39#include "lpfc_disc.h"
40#include "lpfc_scsi.h"
41#include "lpfc.h"
42#include "lpfc_logmsg.h"
43#include "lpfc_crtn.h"
44#include "lpfc_vport.h"
45#include "lpfc_version.h"
46#include "lpfc_vport.h"
47#include "lpfc_debugfs.h"
48
49#ifdef CONFIG_LPFC_DEBUG_FS
50/* debugfs interface
51 *
52 * To access this interface the user should:
53 * # mkdir /debug
54 * # mount -t debugfs none /debug
55 *
56 * The lpfc debugfs directory hierachy is:
57 * lpfc/lpfcX/vportY
58 * where X is the lpfc hba unique_id
59 * where Y is the vport VPI on that hba
60 *
61 * Debugging services available per vport:
62 * discovery_trace
63 * This is an ACSII readable file that contains a trace of the last
64 * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
65 * See lpfc_debugfs.h for different categories of
66 * discovery events. To enable the discovery trace, the following
67 * module parameters must be set:
68 * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
69 * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
70 * EACH vport. X MUST also be a power of 2.
71 * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
72 * lpfc_debugfs.h .
73 */
74static int lpfc_debugfs_enable = 0;
75module_param(lpfc_debugfs_enable, int, 0);
76MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
77
78static int lpfc_debugfs_max_disc_trc = 0; /* This MUST be a power of 2 */
79module_param(lpfc_debugfs_max_disc_trc, int, 0);
80MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
81 "Set debugfs discovery trace depth");
82
83static int lpfc_debugfs_mask_disc_trc = 0;
84module_param(lpfc_debugfs_mask_disc_trc, int, 0);
85MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
86 "Set debugfs discovery trace mask");
87
88#include <linux/debugfs.h>
89
90/* size of discovery_trace output line */
91#define LPFC_DISC_TRC_ENTRY_SIZE 80
92
93/* nodelist output buffer size */
94#define LPFC_NODELIST_SIZE 8192
95#define LPFC_NODELIST_ENTRY_SIZE 120
96
97struct lpfc_debug {
98 char *buffer;
99 int len;
100};
101
102atomic_t lpfc_debugfs_disc_trc_cnt = ATOMIC_INIT(0);
103unsigned long lpfc_debugfs_start_time = 0L;
104
105static int
106lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
107{
108 int i, index, len, enable;
109 uint32_t ms;
110 struct lpfc_disc_trc *dtp;
111 char buffer[80];
112
113
114 enable = lpfc_debugfs_enable;
115 lpfc_debugfs_enable = 0;
116
117 len = 0;
118 index = (atomic_read(&vport->disc_trc_cnt) + 1) &
119 (lpfc_debugfs_max_disc_trc - 1);
120 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
121 dtp = vport->disc_trc + i;
122 if (!dtp->fmt)
123 continue;
124 ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
125 snprintf(buffer, 80, "%010d:%010d ms:%s\n",
126 dtp->seq_cnt, ms, dtp->fmt);
127 len += snprintf(buf+len, size-len, buffer,
128 dtp->data1, dtp->data2, dtp->data3);
129 }
130 for (i = 0; i < index; i++) {
131 dtp = vport->disc_trc + i;
132 if (!dtp->fmt)
133 continue;
134 ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
135 snprintf(buffer, 80, "%010d:%010d ms:%s\n",
136 dtp->seq_cnt, ms, dtp->fmt);
137 len += snprintf(buf+len, size-len, buffer,
138 dtp->data1, dtp->data2, dtp->data3);
139 }
140
141 lpfc_debugfs_enable = enable;
142 return len;
143}
144
145static int
146lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
147{
148 int len = 0;
149 int cnt;
150 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
151 struct lpfc_nodelist *ndlp;
152 unsigned char *statep, *name;
153
154 cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
155
156 spin_lock_irq(shost->host_lock);
157 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
158 if (!cnt) {
159 len += snprintf(buf+len, size-len,
160 "Missing Nodelist Entries\n");
161 break;
162 }
163 cnt--;
164 switch (ndlp->nlp_state) {
165 case NLP_STE_UNUSED_NODE:
166 statep = "UNUSED";
167 break;
168 case NLP_STE_PLOGI_ISSUE:
169 statep = "PLOGI ";
170 break;
171 case NLP_STE_ADISC_ISSUE:
172 statep = "ADISC ";
173 break;
174 case NLP_STE_REG_LOGIN_ISSUE:
175 statep = "REGLOG";
176 break;
177 case NLP_STE_PRLI_ISSUE:
178 statep = "PRLI ";
179 break;
180 case NLP_STE_UNMAPPED_NODE:
181 statep = "UNMAP ";
182 break;
183 case NLP_STE_MAPPED_NODE:
184 statep = "MAPPED";
185 break;
186 case NLP_STE_NPR_NODE:
187 statep = "NPR ";
188 break;
189 default:
190 statep = "UNKNOWN";
191 }
192 len += snprintf(buf+len, size-len, "%s DID:x%06x ",
193 statep, ndlp->nlp_DID);
194 name = (unsigned char *)&ndlp->nlp_portname;
195 len += snprintf(buf+len, size-len,
196 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
197 *name, *(name+1), *(name+2), *(name+3),
198 *(name+4), *(name+5), *(name+6), *(name+7));
199 name = (unsigned char *)&ndlp->nlp_nodename;
200 len += snprintf(buf+len, size-len,
201 "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
202 *name, *(name+1), *(name+2), *(name+3),
203 *(name+4), *(name+5), *(name+6), *(name+7));
204 len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ",
205 ndlp->nlp_rpi, ndlp->nlp_flag);
206 if (!ndlp->nlp_type)
207 len += snprintf(buf+len, size-len, "UNKNOWN_TYPE");
208 if (ndlp->nlp_type & NLP_FC_NODE)
209 len += snprintf(buf+len, size-len, "FC_NODE ");
210 if (ndlp->nlp_type & NLP_FABRIC)
211 len += snprintf(buf+len, size-len, "FABRIC ");
212 if (ndlp->nlp_type & NLP_FCP_TARGET)
213 len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
214 ndlp->nlp_sid);
215 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
216 len += snprintf(buf+len, size-len, "FCP_INITIATOR");
217 len += snprintf(buf+len, size-len, "\n");
218 }
219 spin_unlock_irq(shost->host_lock);
220 return len;
221}
222#endif
223
224
225inline void
226lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
227 uint32_t data1, uint32_t data2, uint32_t data3)
228{
229#ifdef CONFIG_LPFC_DEBUG_FS
230 struct lpfc_disc_trc *dtp;
231 int index;
232
233 if (!(lpfc_debugfs_mask_disc_trc & mask))
234 return;
235
236 if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc ||
237 !vport || !vport->disc_trc)
238 return;
239
240 index = atomic_inc_return(&vport->disc_trc_cnt) &
241 (lpfc_debugfs_max_disc_trc - 1);
242 dtp = vport->disc_trc + index;
243 dtp->fmt = fmt;
244 dtp->data1 = data1;
245 dtp->data2 = data2;
246 dtp->data3 = data3;
247 dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_disc_trc_cnt);
248 dtp->jif = jiffies;
249#endif
250 return;
251}
252
253#ifdef CONFIG_LPFC_DEBUG_FS
254static int
255lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
256{
257 struct lpfc_vport *vport = inode->i_private;
258 struct lpfc_debug *debug;
259 int size;
260 int rc = -ENOMEM;
261
262 if (!lpfc_debugfs_max_disc_trc) {
263 rc = -ENOSPC;
264 goto out;
265 }
266
267 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
268 if (!debug)
269 goto out;
270
271 /* Round to page boundry */
272 size = (lpfc_debugfs_max_disc_trc * LPFC_DISC_TRC_ENTRY_SIZE);
273 size = PAGE_ALIGN(size);
274
275 debug->buffer = kmalloc(size, GFP_KERNEL);
276 if (!debug->buffer) {
277 kfree(debug);
278 goto out;
279 }
280
281 debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size);
282 file->private_data = debug;
283
284 rc = 0;
285out:
286 return rc;
287}
288
289static int
290lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
291{
292 struct lpfc_vport *vport = inode->i_private;
293 struct lpfc_debug *debug;
294 int rc = -ENOMEM;
295
296 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
297 if (!debug)
298 goto out;
299
300 /* Round to page boundry */
301 debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
302 if (!debug->buffer) {
303 kfree(debug);
304 goto out;
305 }
306
307 debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer,
308 LPFC_NODELIST_SIZE);
309 file->private_data = debug;
310
311 rc = 0;
312out:
313 return rc;
314}
315
316static loff_t
317lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
318{
319 struct lpfc_debug *debug;
320 loff_t pos = -1;
321
322 debug = file->private_data;
323
324 switch (whence) {
325 case 0:
326 pos = off;
327 break;
328 case 1:
329 pos = file->f_pos + off;
330 break;
331 case 2:
332 pos = debug->len - off;
333 }
334 return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
335}
336
337static ssize_t
338lpfc_debugfs_read(struct file *file, char __user *buf,
339 size_t nbytes, loff_t *ppos)
340{
341 struct lpfc_debug *debug = file->private_data;
342 return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
343 debug->len);
344}
345
346static int
347lpfc_debugfs_release(struct inode *inode, struct file *file)
348{
349 struct lpfc_debug *debug = file->private_data;
350
351 kfree(debug->buffer);
352 kfree(debug);
353
354 return 0;
355}
356
357#undef lpfc_debugfs_op_disc_trc
358static struct file_operations lpfc_debugfs_op_disc_trc = {
359 .owner = THIS_MODULE,
360 .open = lpfc_debugfs_disc_trc_open,
361 .llseek = lpfc_debugfs_lseek,
362 .read = lpfc_debugfs_read,
363 .release = lpfc_debugfs_release,
364};
365
366#undef lpfc_debugfs_op_nodelist
367static struct file_operations lpfc_debugfs_op_nodelist = {
368 .owner = THIS_MODULE,
369 .open = lpfc_debugfs_nodelist_open,
370 .llseek = lpfc_debugfs_lseek,
371 .read = lpfc_debugfs_read,
372 .release = lpfc_debugfs_release,
373};
374
375static struct dentry *lpfc_debugfs_root = NULL;
376static atomic_t lpfc_debugfs_hba_count;
377#endif
378
379inline void
380lpfc_debugfs_initialize(struct lpfc_vport *vport)
381{
382#ifdef CONFIG_LPFC_DEBUG_FS
383 struct lpfc_hba *phba = vport->phba;
384 char name[64];
385 uint32_t num, i;
386
387 if (!lpfc_debugfs_enable)
388 return;
389
390 if (lpfc_debugfs_max_disc_trc) {
391 num = lpfc_debugfs_max_disc_trc - 1;
392 if (num & lpfc_debugfs_max_disc_trc) {
393 /* Change to be a power of 2 */
394 num = lpfc_debugfs_max_disc_trc;
395 i = 0;
396 while (num > 1) {
397 num = num >> 1;
398 i++;
399 }
400 lpfc_debugfs_max_disc_trc = (1 << i);
401 printk(KERN_ERR
402 "lpfc_debugfs_max_disc_trc changed to %d\n",
403 lpfc_debugfs_max_disc_trc);
404 }
405 }
406
407 if (!lpfc_debugfs_root) {
408 lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
409 atomic_set(&lpfc_debugfs_hba_count, 0);
410 if (!lpfc_debugfs_root)
411 goto debug_failed;
412 }
413
414 snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
415 if (!phba->hba_debugfs_root) {
416 phba->hba_debugfs_root =
417 debugfs_create_dir(name, lpfc_debugfs_root);
418 if (!phba->hba_debugfs_root)
419 goto debug_failed;
420 atomic_inc(&lpfc_debugfs_hba_count);
421 atomic_set(&phba->debugfs_vport_count, 0);
422 }
423
424 snprintf(name, sizeof(name), "vport%d", vport->vpi);
425 if (!vport->vport_debugfs_root) {
426 vport->vport_debugfs_root =
427 debugfs_create_dir(name, phba->hba_debugfs_root);
428 if (!vport->vport_debugfs_root)
429 goto debug_failed;
430 atomic_inc(&phba->debugfs_vport_count);
431 }
432
433 if (!lpfc_debugfs_start_time)
434 lpfc_debugfs_start_time = jiffies;
435
436 vport->disc_trc = kmalloc(
437 (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc),
438 GFP_KERNEL);
439
440 if (!vport->disc_trc)
441 goto debug_failed;
442 memset(vport->disc_trc, 0,
443 (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc));
444
445 snprintf(name, sizeof(name), "discovery_trace");
446 vport->debug_disc_trc =
447 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
448 vport->vport_debugfs_root,
449 vport, &lpfc_debugfs_op_disc_trc);
450 if (!vport->debug_disc_trc) {
451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
452 "%d:0409 Cannot create debugfs",
453 phba->brd_no);
454 goto debug_failed;
455 }
456 snprintf(name, sizeof(name), "nodelist");
457 vport->debug_nodelist =
458 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
459 vport->vport_debugfs_root,
460 vport, &lpfc_debugfs_op_nodelist);
461 if (!vport->debug_nodelist) {
462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
463 "%d:0409 Cannot create debugfs",
464 phba->brd_no);
465 goto debug_failed;
466 }
467debug_failed:
468 return;
469#endif
470}
471
472
473inline void
474lpfc_debugfs_terminate(struct lpfc_vport *vport)
475{
476#ifdef CONFIG_LPFC_DEBUG_FS
477 struct lpfc_hba *phba = vport->phba;
478
479 if (vport->disc_trc) {
480 kfree(vport->disc_trc);
481 vport->disc_trc = NULL;
482 }
483 if (vport->debug_disc_trc) {
484 debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
485 vport->debug_disc_trc = NULL;
486 }
487 if (vport->debug_nodelist) {
488 debugfs_remove(vport->debug_nodelist); /* nodelist */
489 vport->debug_nodelist = NULL;
490 }
491 if (vport->vport_debugfs_root) {
492 debugfs_remove(vport->vport_debugfs_root); /* vportX */
493 vport->vport_debugfs_root = NULL;
494 atomic_dec(&phba->debugfs_vport_count);
495 }
496 if (atomic_read(&phba->debugfs_vport_count) == 0) {
497 debugfs_remove(vport->phba->hba_debugfs_root); /* lpfcX */
498 vport->phba->hba_debugfs_root = NULL;
499 atomic_dec(&lpfc_debugfs_hba_count);
500 if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
501 debugfs_remove(lpfc_debugfs_root); /* lpfc */
502 lpfc_debugfs_root = NULL;
503 }
504 }
505#endif
506}
507
508
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
new file mode 100644
index 000000000000..fffb678426a4
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -0,0 +1,50 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#ifndef _H_LPFC_DEBUG_FS
22#define _H_LPFC_DEBUG_FS
23
24#ifdef CONFIG_LPFC_DEBUG_FS
25struct lpfc_disc_trc {
26 char *fmt;
27 uint32_t data1;
28 uint32_t data2;
29 uint32_t data3;
30 uint32_t seq_cnt;
31 unsigned long jif;
32};
33#endif
34
35/* Mask for discovery_trace */
36#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
37#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
38#define LPFC_DISC_TRC_ELS_UNSOL 0x4 /* Trace ELS rcv'ed */
39#define LPFC_DISC_TRC_ELS_ALL 0x7 /* Trace ELS */
40#define LPFC_DISC_TRC_MBOX_VPORT 0x8 /* Trace vport MBOXs */
41#define LPFC_DISC_TRC_MBOX 0x10 /* Trace other MBOXs */
42#define LPFC_DISC_TRC_MBOX_ALL 0x18 /* Trace all MBOXs */
43#define LPFC_DISC_TRC_CT 0x20 /* Trace disc CT requests */
44#define LPFC_DISC_TRC_DSM 0x40 /* Trace DSM events */
45#define LPFC_DISC_TRC_RPORT 0x80 /* Trace rport events */
46#define LPFC_DISC_TRC_NODE 0x100 /* Trace ndlp state changes */
47
48#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general
49 * discovery */
50#endif /* H_LPFC_DEBUG_FS */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 498059f3f7f4..aacac9ac5381 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -36,21 +36,23 @@ enum lpfc_work_type {
36 LPFC_EVT_WARM_START, 36 LPFC_EVT_WARM_START,
37 LPFC_EVT_KILL, 37 LPFC_EVT_KILL,
38 LPFC_EVT_ELS_RETRY, 38 LPFC_EVT_ELS_RETRY,
39 LPFC_EVT_DEV_LOSS_DELAY,
40 LPFC_EVT_DEV_LOSS,
39}; 41};
40 42
41/* structure used to queue event to the discovery tasklet */ 43/* structure used to queue event to the discovery tasklet */
42struct lpfc_work_evt { 44struct lpfc_work_evt {
43 struct list_head evt_listp; 45 struct list_head evt_listp;
44 void * evt_arg1; 46 void *evt_arg1;
45 void * evt_arg2; 47 void *evt_arg2;
46 enum lpfc_work_type evt; 48 enum lpfc_work_type evt;
47}; 49};
48 50
49 51
50struct lpfc_nodelist { 52struct lpfc_nodelist {
51 struct list_head nlp_listp; 53 struct list_head nlp_listp;
52 struct lpfc_name nlp_portname; /* port name */ 54 struct lpfc_name nlp_portname;
53 struct lpfc_name nlp_nodename; /* node name */ 55 struct lpfc_name nlp_nodename;
54 uint32_t nlp_flag; /* entry flags */ 56 uint32_t nlp_flag; /* entry flags */
55 uint32_t nlp_DID; /* FC D_ID of entry */ 57 uint32_t nlp_DID; /* FC D_ID of entry */
56 uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ 58 uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
@@ -75,8 +77,9 @@ struct lpfc_nodelist {
75 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 77 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
76 struct fc_rport *rport; /* Corresponding FC transport 78 struct fc_rport *rport; /* Corresponding FC transport
77 port structure */ 79 port structure */
78 struct lpfc_hba *nlp_phba; 80 struct lpfc_vport *vport;
79 struct lpfc_work_evt els_retry_evt; 81 struct lpfc_work_evt els_retry_evt;
82 struct lpfc_work_evt dev_loss_evt;
80 unsigned long last_ramp_up_time; /* jiffy of last ramp up */ 83 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
81 unsigned long last_q_full_time; /* jiffy of last queue full */ 84 unsigned long last_q_full_time; /* jiffy of last queue full */
82 struct kref kref; 85 struct kref kref;
@@ -98,7 +101,9 @@ struct lpfc_nodelist {
98 ACC */ 101 ACC */
99#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from 102#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
100 NPR list */ 103 NPR list */
104#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */
101#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ 105#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
106#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
102 107
103/* There are 4 different double linked lists nodelist entries can reside on. 108/* There are 4 different double linked lists nodelist entries can reside on.
104 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used 109 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 638b3cd677bd..33fbc1666946 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -35,38 +35,38 @@
35#include "lpfc.h" 35#include "lpfc.h"
36#include "lpfc_logmsg.h" 36#include "lpfc_logmsg.h"
37#include "lpfc_crtn.h" 37#include "lpfc_crtn.h"
38#include "lpfc_vport.h"
39#include "lpfc_debugfs.h"
38 40
39static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 41static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
40 struct lpfc_iocbq *); 42 struct lpfc_iocbq *);
43static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
44 struct lpfc_iocbq *);
45
41static int lpfc_max_els_tries = 3; 46static int lpfc_max_els_tries = 3;
42 47
43static int 48int
44lpfc_els_chk_latt(struct lpfc_hba * phba) 49lpfc_els_chk_latt(struct lpfc_vport *vport)
45{ 50{
46 struct lpfc_sli *psli; 51 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
47 LPFC_MBOXQ_t *mbox; 52 struct lpfc_hba *phba = vport->phba;
48 uint32_t ha_copy; 53 uint32_t ha_copy;
49 int rc;
50 54
51 psli = &phba->sli; 55 if (vport->port_state >= LPFC_VPORT_READY ||
52 56 phba->link_state == LPFC_LINK_DOWN)
53 if ((phba->hba_state >= LPFC_HBA_READY) ||
54 (phba->hba_state == LPFC_LINK_DOWN))
55 return 0; 57 return 0;
56 58
57 /* Read the HBA Host Attention Register */ 59 /* Read the HBA Host Attention Register */
58 spin_lock_irq(phba->host->host_lock);
59 ha_copy = readl(phba->HAregaddr); 60 ha_copy = readl(phba->HAregaddr);
60 spin_unlock_irq(phba->host->host_lock);
61 61
62 if (!(ha_copy & HA_LATT)) 62 if (!(ha_copy & HA_LATT))
63 return 0; 63 return 0;
64 64
65 /* Pending Link Event during Discovery */ 65 /* Pending Link Event during Discovery */
66 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY, 66 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
67 "%d:0237 Pending Link Event during " 67 "%d (%d):0237 Pending Link Event during "
68 "Discovery: State x%x\n", 68 "Discovery: State x%x\n",
69 phba->brd_no, phba->hba_state); 69 phba->brd_no, vport->vpi, phba->pport->port_state);
70 70
71 /* CLEAR_LA should re-enable link attention events and 71 /* CLEAR_LA should re-enable link attention events and
72 * we should then imediately take a LATT event. The 72 * we should then imediately take a LATT event. The
@@ -74,48 +74,34 @@ lpfc_els_chk_latt(struct lpfc_hba * phba)
74 * will cleanup any left over in-progress discovery 74 * will cleanup any left over in-progress discovery
75 * events. 75 * events.
76 */ 76 */
77 spin_lock_irq(phba->host->host_lock); 77 spin_lock_irq(shost->host_lock);
78 phba->fc_flag |= FC_ABORT_DISCOVERY; 78 vport->fc_flag |= FC_ABORT_DISCOVERY;
79 spin_unlock_irq(phba->host->host_lock); 79 spin_unlock_irq(shost->host_lock);
80
81 if (phba->hba_state != LPFC_CLEAR_LA) {
82 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
83 phba->hba_state = LPFC_CLEAR_LA;
84 lpfc_clear_la(phba, mbox);
85 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
86 rc = lpfc_sli_issue_mbox (phba, mbox,
87 (MBX_NOWAIT | MBX_STOP_IOCB));
88 if (rc == MBX_NOT_FINISHED) {
89 mempool_free(mbox, phba->mbox_mem_pool);
90 phba->hba_state = LPFC_HBA_ERROR;
91 }
92 }
93 }
94 80
95 return 1; 81 if (phba->link_state != LPFC_CLEAR_LA)
82 lpfc_issue_clear_la(phba, vport);
96 83
84 return 1;
97} 85}
98 86
99static struct lpfc_iocbq * 87static struct lpfc_iocbq *
100lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, 88lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
101 uint16_t cmdSize, uint8_t retry, struct lpfc_nodelist * ndlp, 89 uint16_t cmdSize, uint8_t retry,
102 uint32_t did, uint32_t elscmd) 90 struct lpfc_nodelist *ndlp, uint32_t did,
91 uint32_t elscmd)
103{ 92{
104 struct lpfc_sli_ring *pring; 93 struct lpfc_hba *phba = vport->phba;
105 struct lpfc_iocbq *elsiocb; 94 struct lpfc_iocbq *elsiocb;
106 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 95 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
107 struct ulp_bde64 *bpl; 96 struct ulp_bde64 *bpl;
108 IOCB_t *icmd; 97 IOCB_t *icmd;
109 98
110 pring = &phba->sli.ring[LPFC_ELS_RING];
111 99
112 if (phba->hba_state < LPFC_LINK_UP) 100 if (!lpfc_is_link_up(phba))
113 return NULL; 101 return NULL;
114 102
115 /* Allocate buffer for command iocb */ 103 /* Allocate buffer for command iocb */
116 spin_lock_irq(phba->host->host_lock);
117 elsiocb = lpfc_sli_get_iocbq(phba); 104 elsiocb = lpfc_sli_get_iocbq(phba);
118 spin_unlock_irq(phba->host->host_lock);
119 105
120 if (elsiocb == NULL) 106 if (elsiocb == NULL)
121 return NULL; 107 return NULL;
@@ -123,14 +109,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
123 109
124 /* fill in BDEs for command */ 110 /* fill in BDEs for command */
125 /* Allocate buffer for command payload */ 111 /* Allocate buffer for command payload */
126 if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) || 112 if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
127 ((pcmd->virt = lpfc_mbuf_alloc(phba, 113 ((pcmd->virt = lpfc_mbuf_alloc(phba,
128 MEM_PRI, &(pcmd->phys))) == 0)) { 114 MEM_PRI, &(pcmd->phys))) == 0)) {
129 kfree(pcmd); 115 kfree(pcmd);
130 116
131 spin_lock_irq(phba->host->host_lock);
132 lpfc_sli_release_iocbq(phba, elsiocb); 117 lpfc_sli_release_iocbq(phba, elsiocb);
133 spin_unlock_irq(phba->host->host_lock);
134 return NULL; 118 return NULL;
135 } 119 }
136 120
@@ -138,7 +122,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
138 122
139 /* Allocate buffer for response payload */ 123 /* Allocate buffer for response payload */
140 if (expectRsp) { 124 if (expectRsp) {
141 prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 125 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
142 if (prsp) 126 if (prsp)
143 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 127 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
144 &prsp->phys); 128 &prsp->phys);
@@ -146,9 +130,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
146 kfree(prsp); 130 kfree(prsp);
147 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 131 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
148 kfree(pcmd); 132 kfree(pcmd);
149 spin_lock_irq(phba->host->host_lock);
150 lpfc_sli_release_iocbq(phba, elsiocb); 133 lpfc_sli_release_iocbq(phba, elsiocb);
151 spin_unlock_irq(phba->host->host_lock);
152 return NULL; 134 return NULL;
153 } 135 }
154 INIT_LIST_HEAD(&prsp->list); 136 INIT_LIST_HEAD(&prsp->list);
@@ -157,14 +139,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
157 } 139 }
158 140
159 /* Allocate buffer for Buffer ptr list */ 141 /* Allocate buffer for Buffer ptr list */
160 pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 142 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
161 if (pbuflist) 143 if (pbuflist)
162 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 144 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
163 &pbuflist->phys); 145 &pbuflist->phys);
164 if (pbuflist == 0 || pbuflist->virt == 0) { 146 if (pbuflist == 0 || pbuflist->virt == 0) {
165 spin_lock_irq(phba->host->host_lock);
166 lpfc_sli_release_iocbq(phba, elsiocb); 147 lpfc_sli_release_iocbq(phba, elsiocb);
167 spin_unlock_irq(phba->host->host_lock);
168 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 148 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
169 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 149 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
170 kfree(pcmd); 150 kfree(pcmd);
@@ -178,20 +158,28 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
178 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 158 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
179 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 159 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
180 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL; 160 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
161 icmd->un.elsreq64.remoteID = did; /* DID */
181 if (expectRsp) { 162 if (expectRsp) {
182 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 163 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
183 icmd->un.elsreq64.remoteID = did; /* DID */
184 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 164 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
185 icmd->ulpTimeout = phba->fc_ratov * 2; 165 icmd->ulpTimeout = phba->fc_ratov * 2;
186 } else { 166 } else {
187 icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64); 167 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
188 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 168 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
189 } 169 }
190
191 icmd->ulpBdeCount = 1; 170 icmd->ulpBdeCount = 1;
192 icmd->ulpLe = 1; 171 icmd->ulpLe = 1;
193 icmd->ulpClass = CLASS3; 172 icmd->ulpClass = CLASS3;
194 173
174 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
175 icmd->un.elsreq64.myID = vport->fc_myDID;
176
177 /* For ELS_REQUEST64_CR, use the VPI by default */
178 icmd->ulpContext = vport->vpi;
179 icmd->ulpCt_h = 0;
180 icmd->ulpCt_l = 1;
181 }
182
195 bpl = (struct ulp_bde64 *) pbuflist->virt; 183 bpl = (struct ulp_bde64 *) pbuflist->virt;
196 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 184 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
197 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 185 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
@@ -209,10 +197,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
209 } 197 }
210 198
211 /* Save for completion so we can release these resources */ 199 /* Save for completion so we can release these resources */
212 elsiocb->context1 = lpfc_nlp_get(ndlp); 200 if (elscmd != ELS_CMD_LS_RJT)
201 elsiocb->context1 = lpfc_nlp_get(ndlp);
213 elsiocb->context2 = pcmd; 202 elsiocb->context2 = pcmd;
214 elsiocb->context3 = pbuflist; 203 elsiocb->context3 = pbuflist;
215 elsiocb->retry = retry; 204 elsiocb->retry = retry;
205 elsiocb->vport = vport;
216 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 206 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
217 207
218 if (prsp) { 208 if (prsp) {
@@ -222,16 +212,16 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
222 if (expectRsp) { 212 if (expectRsp) {
223 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 213 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
224 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 214 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
225 "%d:0116 Xmit ELS command x%x to remote " 215 "%d (%d):0116 Xmit ELS command x%x to remote "
226 "NPORT x%x I/O tag: x%x, HBA state: x%x\n", 216 "NPORT x%x I/O tag: x%x, port state: x%x\n",
227 phba->brd_no, elscmd, 217 phba->brd_no, vport->vpi, elscmd, did,
228 did, elsiocb->iotag, phba->hba_state); 218 elsiocb->iotag, vport->port_state);
229 } else { 219 } else {
230 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 220 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
231 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 221 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
232 "%d:0117 Xmit ELS response x%x to remote " 222 "%d (%d):0117 Xmit ELS response x%x to remote "
233 "NPORT x%x I/O tag: x%x, size: x%x\n", 223 "NPORT x%x I/O tag: x%x, size: x%x\n",
234 phba->brd_no, elscmd, 224 phba->brd_no, vport->vpi, elscmd,
235 ndlp->nlp_DID, elsiocb->iotag, cmdSize); 225 ndlp->nlp_DID, elsiocb->iotag, cmdSize);
236 } 226 }
237 227
@@ -240,16 +230,79 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
240 230
241 231
242static int 232static int
243lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 233lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
244 struct serv_parm *sp, IOCB_t *irsp)
245{ 234{
235 struct lpfc_hba *phba = vport->phba;
246 LPFC_MBOXQ_t *mbox; 236 LPFC_MBOXQ_t *mbox;
247 struct lpfc_dmabuf *mp; 237 struct lpfc_dmabuf *mp;
238 struct lpfc_nodelist *ndlp;
239 struct serv_parm *sp;
248 int rc; 240 int rc;
249 241
250 spin_lock_irq(phba->host->host_lock); 242 sp = &phba->fc_fabparam;
251 phba->fc_flag |= FC_FABRIC; 243 ndlp = lpfc_findnode_did(vport, Fabric_DID);
252 spin_unlock_irq(phba->host->host_lock); 244 if (!ndlp)
245 goto fail;
246
247 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
248 if (!mbox)
249 goto fail;
250
251 vport->port_state = LPFC_FABRIC_CFG_LINK;
252 lpfc_config_link(phba, mbox);
253 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
254 mbox->vport = vport;
255
256 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
257 if (rc == MBX_NOT_FINISHED)
258 goto fail_free_mbox;
259
260 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
261 if (!mbox)
262 goto fail;
263 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
264 0);
265 if (rc)
266 goto fail_free_mbox;
267
268 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
269 mbox->vport = vport;
270 mbox->context2 = lpfc_nlp_get(ndlp);
271
272 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
273 if (rc == MBX_NOT_FINISHED)
274 goto fail_issue_reg_login;
275
276 return 0;
277
278fail_issue_reg_login:
279 lpfc_nlp_put(ndlp);
280 mp = (struct lpfc_dmabuf *) mbox->context1;
281 lpfc_mbuf_free(phba, mp->virt, mp->phys);
282 kfree(mp);
283fail_free_mbox:
284 mempool_free(mbox, phba->mbox_mem_pool);
285
286fail:
287 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
288 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
289 "%d (%d):0249 Cannot issue Register Fabric login\n",
290 phba->brd_no, vport->vpi);
291 return -ENXIO;
292}
293
294static int
295lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
296 struct serv_parm *sp, IOCB_t *irsp)
297{
298 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
299 struct lpfc_hba *phba = vport->phba;
300 struct lpfc_nodelist *np;
301 struct lpfc_nodelist *next_np;
302
303 spin_lock_irq(shost->host_lock);
304 vport->fc_flag |= FC_FABRIC;
305 spin_unlock_irq(shost->host_lock);
253 306
254 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 307 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
255 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 308 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
@@ -258,20 +311,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
258 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 311 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
259 312
260 if (phba->fc_topology == TOPOLOGY_LOOP) { 313 if (phba->fc_topology == TOPOLOGY_LOOP) {
261 spin_lock_irq(phba->host->host_lock); 314 spin_lock_irq(shost->host_lock);
262 phba->fc_flag |= FC_PUBLIC_LOOP; 315 vport->fc_flag |= FC_PUBLIC_LOOP;
263 spin_unlock_irq(phba->host->host_lock); 316 spin_unlock_irq(shost->host_lock);
264 } else { 317 } else {
265 /* 318 /*
266 * If we are a N-port connected to a Fabric, fixup sparam's so 319 * If we are a N-port connected to a Fabric, fixup sparam's so
267 * logins to devices on remote loops work. 320 * logins to devices on remote loops work.
268 */ 321 */
269 phba->fc_sparam.cmn.altBbCredit = 1; 322 vport->fc_sparam.cmn.altBbCredit = 1;
270 } 323 }
271 324
272 phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 325 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
273 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 326 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
274 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 327 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
275 ndlp->nlp_class_sup = 0; 328 ndlp->nlp_class_sup = 0;
276 if (sp->cls1.classValid) 329 if (sp->cls1.classValid)
277 ndlp->nlp_class_sup |= FC_COS_CLASS1; 330 ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -285,68 +338,85 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
285 sp->cmn.bbRcvSizeLsb; 338 sp->cmn.bbRcvSizeLsb;
286 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 339 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
287 340
288 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 341 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
289 if (!mbox) 342 if (sp->cmn.response_multiple_NPort) {
290 goto fail; 343 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
291 344 "%d:1816 FLOGI NPIV supported, "
292 phba->hba_state = LPFC_FABRIC_CFG_LINK; 345 "response data 0x%x\n",
293 lpfc_config_link(phba, mbox); 346 phba->brd_no,
294 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 347 sp->cmn.response_multiple_NPort);
348 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
295 349
296 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 350 } else {
297 if (rc == MBX_NOT_FINISHED) 351 /* Because we asked f/w for NPIV it still expects us
298 goto fail_free_mbox; 352 to call reg_vnpid atleast for the physcial host */
299 353 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
300 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 354 "%d:1817 Fabric does not support NPIV "
301 if (!mbox) 355 "- configuring single port mode.\n",
302 goto fail; 356 phba->brd_no);
357 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
358 }
359 }
303 360
304 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0)) 361 if ((vport->fc_prevDID != vport->fc_myDID) &&
305 goto fail_free_mbox; 362 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
306 363
307 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 364 /* If our NportID changed, we need to ensure all
308 mbox->context2 = lpfc_nlp_get(ndlp); 365 * remaining NPORTs get unreg_login'ed.
366 */
367 list_for_each_entry_safe(np, next_np,
368 &vport->fc_nodes, nlp_listp) {
369 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
370 !(np->nlp_flag & NLP_NPR_ADISC))
371 continue;
372 spin_lock_irq(shost->host_lock);
373 np->nlp_flag &= ~NLP_NPR_ADISC;
374 spin_unlock_irq(shost->host_lock);
375 lpfc_unreg_rpi(vport, np);
376 }
377 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
378 lpfc_mbx_unreg_vpi(vport);
379 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
380 }
381 }
309 382
310 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 383 ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID;
311 if (rc == MBX_NOT_FINISHED) 384 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
312 goto fail_issue_reg_login;
313 385
386 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
387 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
388 lpfc_register_new_vport(phba, vport, ndlp);
389 return 0;
390 }
391 lpfc_issue_fabric_reglogin(vport);
314 return 0; 392 return 0;
315
316 fail_issue_reg_login:
317 lpfc_nlp_put(ndlp);
318 mp = (struct lpfc_dmabuf *) mbox->context1;
319 lpfc_mbuf_free(phba, mp->virt, mp->phys);
320 kfree(mp);
321 fail_free_mbox:
322 mempool_free(mbox, phba->mbox_mem_pool);
323 fail:
324 return -ENXIO;
325} 393}
326 394
327/* 395/*
328 * We FLOGIed into an NPort, initiate pt2pt protocol 396 * We FLOGIed into an NPort, initiate pt2pt protocol
329 */ 397 */
330static int 398static int
331lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 399lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
332 struct serv_parm *sp) 400 struct serv_parm *sp)
333{ 401{
402 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
403 struct lpfc_hba *phba = vport->phba;
334 LPFC_MBOXQ_t *mbox; 404 LPFC_MBOXQ_t *mbox;
335 int rc; 405 int rc;
336 406
337 spin_lock_irq(phba->host->host_lock); 407 spin_lock_irq(shost->host_lock);
338 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 408 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
339 spin_unlock_irq(phba->host->host_lock); 409 spin_unlock_irq(shost->host_lock);
340 410
341 phba->fc_edtov = FF_DEF_EDTOV; 411 phba->fc_edtov = FF_DEF_EDTOV;
342 phba->fc_ratov = FF_DEF_RATOV; 412 phba->fc_ratov = FF_DEF_RATOV;
343 rc = memcmp(&phba->fc_portname, &sp->portName, 413 rc = memcmp(&vport->fc_portname, &sp->portName,
344 sizeof(struct lpfc_name)); 414 sizeof(vport->fc_portname));
345 if (rc >= 0) { 415 if (rc >= 0) {
346 /* This side will initiate the PLOGI */ 416 /* This side will initiate the PLOGI */
347 spin_lock_irq(phba->host->host_lock); 417 spin_lock_irq(shost->host_lock);
348 phba->fc_flag |= FC_PT2PT_PLOGI; 418 vport->fc_flag |= FC_PT2PT_PLOGI;
349 spin_unlock_irq(phba->host->host_lock); 419 spin_unlock_irq(shost->host_lock);
350 420
351 /* 421 /*
352 * N_Port ID cannot be 0, set our to LocalID the other 422 * N_Port ID cannot be 0, set our to LocalID the other
@@ -355,7 +425,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
355 425
356 /* not equal */ 426 /* not equal */
357 if (rc) 427 if (rc)
358 phba->fc_myDID = PT2PT_LocalID; 428 vport->fc_myDID = PT2PT_LocalID;
359 429
360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 430 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
361 if (!mbox) 431 if (!mbox)
@@ -364,15 +434,16 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
364 lpfc_config_link(phba, mbox); 434 lpfc_config_link(phba, mbox);
365 435
366 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 436 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
437 mbox->vport = vport;
367 rc = lpfc_sli_issue_mbox(phba, mbox, 438 rc = lpfc_sli_issue_mbox(phba, mbox,
368 MBX_NOWAIT | MBX_STOP_IOCB); 439 MBX_NOWAIT | MBX_STOP_IOCB);
369 if (rc == MBX_NOT_FINISHED) { 440 if (rc == MBX_NOT_FINISHED) {
370 mempool_free(mbox, phba->mbox_mem_pool); 441 mempool_free(mbox, phba->mbox_mem_pool);
371 goto fail; 442 goto fail;
372 } 443 }
373 lpfc_nlp_put(ndlp); 444 lpfc_nlp_put(ndlp);
374 445
375 ndlp = lpfc_findnode_did(phba, PT2PT_RemoteID); 446 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
376 if (!ndlp) { 447 if (!ndlp) {
377 /* 448 /*
378 * Cannot find existing Fabric ndlp, so allocate a 449 * Cannot find existing Fabric ndlp, so allocate a
@@ -382,28 +453,30 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
382 if (!ndlp) 453 if (!ndlp)
383 goto fail; 454 goto fail;
384 455
385 lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID); 456 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
386 } 457 }
387 458
388 memcpy(&ndlp->nlp_portname, &sp->portName, 459 memcpy(&ndlp->nlp_portname, &sp->portName,
389 sizeof(struct lpfc_name)); 460 sizeof(struct lpfc_name));
390 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 461 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
391 sizeof(struct lpfc_name)); 462 sizeof(struct lpfc_name));
392 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 463 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
464 spin_lock_irq(shost->host_lock);
393 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 465 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
466 spin_unlock_irq(shost->host_lock);
394 } else { 467 } else {
395 /* This side will wait for the PLOGI */ 468 /* This side will wait for the PLOGI */
396 lpfc_nlp_put(ndlp); 469 lpfc_nlp_put(ndlp);
397 } 470 }
398 471
399 spin_lock_irq(phba->host->host_lock); 472 spin_lock_irq(shost->host_lock);
400 phba->fc_flag |= FC_PT2PT; 473 vport->fc_flag |= FC_PT2PT;
401 spin_unlock_irq(phba->host->host_lock); 474 spin_unlock_irq(shost->host_lock);
402 475
403 /* Start discovery - this should just do CLEAR_LA */ 476 /* Start discovery - this should just do CLEAR_LA */
404 lpfc_disc_start(phba); 477 lpfc_disc_start(vport);
405 return 0; 478 return 0;
406 fail: 479fail:
407 return -ENXIO; 480 return -ENXIO;
408} 481}
409 482
@@ -411,6 +484,8 @@ static void
411lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 484lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
412 struct lpfc_iocbq *rspiocb) 485 struct lpfc_iocbq *rspiocb)
413{ 486{
487 struct lpfc_vport *vport = cmdiocb->vport;
488 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
414 IOCB_t *irsp = &rspiocb->iocb; 489 IOCB_t *irsp = &rspiocb->iocb;
415 struct lpfc_nodelist *ndlp = cmdiocb->context1; 490 struct lpfc_nodelist *ndlp = cmdiocb->context1;
416 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 491 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
@@ -418,21 +493,25 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
418 int rc; 493 int rc;
419 494
420 /* Check to see if link went down during discovery */ 495 /* Check to see if link went down during discovery */
421 if (lpfc_els_chk_latt(phba)) { 496 if (lpfc_els_chk_latt(vport)) {
422 lpfc_nlp_put(ndlp); 497 lpfc_nlp_put(ndlp);
423 goto out; 498 goto out;
424 } 499 }
425 500
501 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
502 "FLOGI cmpl: status:x%x/x%x state:x%x",
503 irsp->ulpStatus, irsp->un.ulpWord[4],
504 vport->port_state);
505
426 if (irsp->ulpStatus) { 506 if (irsp->ulpStatus) {
427 /* Check for retry */ 507 /* Check for retry */
428 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 508 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
429 /* ELS command is being retried */
430 goto out; 509 goto out;
431 } 510
432 /* FLOGI failed, so there is no fabric */ 511 /* FLOGI failed, so there is no fabric */
433 spin_lock_irq(phba->host->host_lock); 512 spin_lock_irq(shost->host_lock);
434 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 513 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
435 spin_unlock_irq(phba->host->host_lock); 514 spin_unlock_irq(shost->host_lock);
436 515
437 /* If private loop, then allow max outstanding els to be 516 /* If private loop, then allow max outstanding els to be
438 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 517 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
@@ -443,11 +522,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
443 } 522 }
444 523
445 /* FLOGI failure */ 524 /* FLOGI failure */
446 lpfc_printf_log(phba, 525 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
447 KERN_INFO, 526 "%d (%d):0100 FLOGI failure Data: x%x x%x "
448 LOG_ELS, 527 "x%x\n",
449 "%d:0100 FLOGI failure Data: x%x x%x x%x\n", 528 phba->brd_no, vport->vpi,
450 phba->brd_no,
451 irsp->ulpStatus, irsp->un.ulpWord[4], 529 irsp->ulpStatus, irsp->un.ulpWord[4],
452 irsp->ulpTimeout); 530 irsp->ulpTimeout);
453 goto flogifail; 531 goto flogifail;
@@ -463,21 +541,21 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
463 541
464 /* FLOGI completes successfully */ 542 /* FLOGI completes successfully */
465 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 543 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
466 "%d:0101 FLOGI completes sucessfully " 544 "%d (%d):0101 FLOGI completes sucessfully "
467 "Data: x%x x%x x%x x%x\n", 545 "Data: x%x x%x x%x x%x\n",
468 phba->brd_no, 546 phba->brd_no, vport->vpi,
469 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 547 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
470 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 548 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
471 549
472 if (phba->hba_state == LPFC_FLOGI) { 550 if (vport->port_state == LPFC_FLOGI) {
473 /* 551 /*
474 * If Common Service Parameters indicate Nport 552 * If Common Service Parameters indicate Nport
475 * we are point to point, if Fport we are Fabric. 553 * we are point to point, if Fport we are Fabric.
476 */ 554 */
477 if (sp->cmn.fPort) 555 if (sp->cmn.fPort)
478 rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp); 556 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
479 else 557 else
480 rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp); 558 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
481 559
482 if (!rc) 560 if (!rc)
483 goto out; 561 goto out;
@@ -486,14 +564,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
486flogifail: 564flogifail:
487 lpfc_nlp_put(ndlp); 565 lpfc_nlp_put(ndlp);
488 566
489 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || 567 if (!lpfc_error_lost_link(irsp)) {
490 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
491 irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {
492 /* FLOGI failed, so just use loop map to make discovery list */ 568 /* FLOGI failed, so just use loop map to make discovery list */
493 lpfc_disc_list_loopmap(phba); 569 lpfc_disc_list_loopmap(vport);
494 570
495 /* Start discovery */ 571 /* Start discovery */
496 lpfc_disc_start(phba); 572 lpfc_disc_start(vport);
497 } 573 }
498 574
499out: 575out:
@@ -501,9 +577,10 @@ out:
501} 577}
502 578
503static int 579static int
504lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 580lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
505 uint8_t retry) 581 uint8_t retry)
506{ 582{
583 struct lpfc_hba *phba = vport->phba;
507 struct serv_parm *sp; 584 struct serv_parm *sp;
508 IOCB_t *icmd; 585 IOCB_t *icmd;
509 struct lpfc_iocbq *elsiocb; 586 struct lpfc_iocbq *elsiocb;
@@ -515,9 +592,10 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
515 592
516 pring = &phba->sli.ring[LPFC_ELS_RING]; 593 pring = &phba->sli.ring[LPFC_ELS_RING];
517 594
518 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); 595 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
519 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 596 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
520 ndlp->nlp_DID, ELS_CMD_FLOGI); 597 ndlp->nlp_DID, ELS_CMD_FLOGI);
598
521 if (!elsiocb) 599 if (!elsiocb)
522 return 1; 600 return 1;
523 601
@@ -526,8 +604,8 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
526 604
527 /* For FLOGI request, remainder of payload is service parameters */ 605 /* For FLOGI request, remainder of payload is service parameters */
528 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 606 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
529 pcmd += sizeof (uint32_t); 607 pcmd += sizeof(uint32_t);
530 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); 608 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
531 sp = (struct serv_parm *) pcmd; 609 sp = (struct serv_parm *) pcmd;
532 610
533 /* Setup CSPs accordingly for Fabric */ 611 /* Setup CSPs accordingly for Fabric */
@@ -541,16 +619,32 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
541 if (sp->cmn.fcphHigh < FC_PH3) 619 if (sp->cmn.fcphHigh < FC_PH3)
542 sp->cmn.fcphHigh = FC_PH3; 620 sp->cmn.fcphHigh = FC_PH3;
543 621
622 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
623 sp->cmn.request_multiple_Nport = 1;
624
625 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
626 icmd->ulpCt_h = 1;
627 icmd->ulpCt_l = 0;
628 }
629
630 if (phba->fc_topology != TOPOLOGY_LOOP) {
631 icmd->un.elsreq64.myID = 0;
632 icmd->un.elsreq64.fl = 1;
633 }
634
544 tmo = phba->fc_ratov; 635 tmo = phba->fc_ratov;
545 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 636 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
546 lpfc_set_disctmo(phba); 637 lpfc_set_disctmo(vport);
547 phba->fc_ratov = tmo; 638 phba->fc_ratov = tmo;
548 639
549 phba->fc_stat.elsXmitFLOGI++; 640 phba->fc_stat.elsXmitFLOGI++;
550 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 641 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
551 spin_lock_irq(phba->host->host_lock); 642
552 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 643 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
553 spin_unlock_irq(phba->host->host_lock); 644 "Issue FLOGI: opt:x%x",
645 phba->sli3_options, 0, 0);
646
647 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
554 if (rc == IOCB_ERROR) { 648 if (rc == IOCB_ERROR) {
555 lpfc_els_free_iocb(phba, elsiocb); 649 lpfc_els_free_iocb(phba, elsiocb);
556 return 1; 650 return 1;
@@ -559,7 +653,7 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
559} 653}
560 654
561int 655int
562lpfc_els_abort_flogi(struct lpfc_hba * phba) 656lpfc_els_abort_flogi(struct lpfc_hba *phba)
563{ 657{
564 struct lpfc_sli_ring *pring; 658 struct lpfc_sli_ring *pring;
565 struct lpfc_iocbq *iocb, *next_iocb; 659 struct lpfc_iocbq *iocb, *next_iocb;
@@ -577,73 +671,99 @@ lpfc_els_abort_flogi(struct lpfc_hba * phba)
577 * Check the txcmplq for an iocb that matches the nport the driver is 671 * Check the txcmplq for an iocb that matches the nport the driver is
578 * searching for. 672 * searching for.
579 */ 673 */
580 spin_lock_irq(phba->host->host_lock); 674 spin_lock_irq(&phba->hbalock);
581 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 675 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
582 icmd = &iocb->iocb; 676 icmd = &iocb->iocb;
583 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 677 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
678 icmd->un.elsreq64.bdl.ulpIoTag32) {
584 ndlp = (struct lpfc_nodelist *)(iocb->context1); 679 ndlp = (struct lpfc_nodelist *)(iocb->context1);
585 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) 680 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
586 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 681 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
682 }
587 } 683 }
588 } 684 }
589 spin_unlock_irq(phba->host->host_lock); 685 spin_unlock_irq(&phba->hbalock);
590 686
591 return 0; 687 return 0;
592} 688}
593 689
594int 690int
595lpfc_initial_flogi(struct lpfc_hba *phba) 691lpfc_initial_flogi(struct lpfc_vport *vport)
596{ 692{
693 struct lpfc_hba *phba = vport->phba;
597 struct lpfc_nodelist *ndlp; 694 struct lpfc_nodelist *ndlp;
598 695
599 /* First look for the Fabric ndlp */ 696 /* First look for the Fabric ndlp */
600 ndlp = lpfc_findnode_did(phba, Fabric_DID); 697 ndlp = lpfc_findnode_did(vport, Fabric_DID);
601 if (!ndlp) { 698 if (!ndlp) {
602 /* Cannot find existing Fabric ndlp, so allocate a new one */ 699 /* Cannot find existing Fabric ndlp, so allocate a new one */
603 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 700 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
604 if (!ndlp) 701 if (!ndlp)
605 return 0; 702 return 0;
606 lpfc_nlp_init(phba, ndlp, Fabric_DID); 703 lpfc_nlp_init(vport, ndlp, Fabric_DID);
607 } else { 704 } else {
608 lpfc_dequeue_node(phba, ndlp); 705 lpfc_dequeue_node(vport, ndlp);
609 } 706 }
610 if (lpfc_issue_els_flogi(phba, ndlp, 0)) { 707 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
611 lpfc_nlp_put(ndlp); 708 lpfc_nlp_put(ndlp);
612 } 709 }
613 return 1; 710 return 1;
614} 711}
615 712
713int
714lpfc_initial_fdisc(struct lpfc_vport *vport)
715{
716 struct lpfc_hba *phba = vport->phba;
717 struct lpfc_nodelist *ndlp;
718
719 /* First look for the Fabric ndlp */
720 ndlp = lpfc_findnode_did(vport, Fabric_DID);
721 if (!ndlp) {
722 /* Cannot find existing Fabric ndlp, so allocate a new one */
723 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
724 if (!ndlp)
725 return 0;
726 lpfc_nlp_init(vport, ndlp, Fabric_DID);
727 } else {
728 lpfc_dequeue_node(vport, ndlp);
729 }
730 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
731 lpfc_nlp_put(ndlp);
732 }
733 return 1;
734}
616static void 735static void
617lpfc_more_plogi(struct lpfc_hba * phba) 736lpfc_more_plogi(struct lpfc_vport *vport)
618{ 737{
619 int sentplogi; 738 int sentplogi;
739 struct lpfc_hba *phba = vport->phba;
620 740
621 if (phba->num_disc_nodes) 741 if (vport->num_disc_nodes)
622 phba->num_disc_nodes--; 742 vport->num_disc_nodes--;
623 743
624 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 744 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
625 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 745 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
626 "%d:0232 Continue discovery with %d PLOGIs to go " 746 "%d (%d):0232 Continue discovery with %d PLOGIs to go "
627 "Data: x%x x%x x%x\n", 747 "Data: x%x x%x x%x\n",
628 phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt, 748 phba->brd_no, vport->vpi, vport->num_disc_nodes,
629 phba->fc_flag, phba->hba_state); 749 vport->fc_plogi_cnt, vport->fc_flag, vport->port_state);
630 750
631 /* Check to see if there are more PLOGIs to be sent */ 751 /* Check to see if there are more PLOGIs to be sent */
632 if (phba->fc_flag & FC_NLP_MORE) { 752 if (vport->fc_flag & FC_NLP_MORE)
633 /* go thru NPR list and issue any remaining ELS PLOGIs */ 753 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
634 sentplogi = lpfc_els_disc_plogi(phba); 754 sentplogi = lpfc_els_disc_plogi(vport);
635 } 755
636 return; 756 return;
637} 757}
638 758
639static struct lpfc_nodelist * 759static struct lpfc_nodelist *
640lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp, 760lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
641 struct lpfc_nodelist *ndlp) 761 struct lpfc_nodelist *ndlp)
642{ 762{
763 struct lpfc_vport *vport = ndlp->vport;
643 struct lpfc_nodelist *new_ndlp; 764 struct lpfc_nodelist *new_ndlp;
644 uint32_t *lp;
645 struct serv_parm *sp; 765 struct serv_parm *sp;
646 uint8_t name[sizeof (struct lpfc_name)]; 766 uint8_t name[sizeof(struct lpfc_name)];
647 uint32_t rc; 767 uint32_t rc;
648 768
649 /* Fabric nodes can have the same WWPN so we don't bother searching 769 /* Fabric nodes can have the same WWPN so we don't bother searching
@@ -652,50 +772,51 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
652 if (ndlp->nlp_type & NLP_FABRIC) 772 if (ndlp->nlp_type & NLP_FABRIC)
653 return ndlp; 773 return ndlp;
654 774
655 lp = (uint32_t *) prsp->virt; 775 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
656 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
657 memset(name, 0, sizeof(struct lpfc_name)); 776 memset(name, 0, sizeof(struct lpfc_name));
658 777
659 /* Now we find out if the NPort we are logging into, matches the WWPN 778 /* Now we find out if the NPort we are logging into, matches the WWPN
660 * we have for that ndlp. If not, we have some work to do. 779 * we have for that ndlp. If not, we have some work to do.
661 */ 780 */
662 new_ndlp = lpfc_findnode_wwpn(phba, &sp->portName); 781 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
663 782
664 if (new_ndlp == ndlp) 783 if (new_ndlp == ndlp)
665 return ndlp; 784 return ndlp;
666 785
667 if (!new_ndlp) { 786 if (!new_ndlp) {
668 rc = 787 rc = memcmp(&ndlp->nlp_portname, name,
669 memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); 788 sizeof(struct lpfc_name));
670 if (!rc) 789 if (!rc)
671 return ndlp; 790 return ndlp;
672 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 791 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
673 if (!new_ndlp) 792 if (!new_ndlp)
674 return ndlp; 793 return ndlp;
675 794
676 lpfc_nlp_init(phba, new_ndlp, ndlp->nlp_DID); 795 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
677 } 796 }
678 797
679 lpfc_unreg_rpi(phba, new_ndlp); 798 lpfc_unreg_rpi(vport, new_ndlp);
680 new_ndlp->nlp_DID = ndlp->nlp_DID; 799 new_ndlp->nlp_DID = ndlp->nlp_DID;
681 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 800 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
682 lpfc_nlp_set_state(phba, new_ndlp, ndlp->nlp_state); 801 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
683 802
684 /* Move this back to NPR list */ 803 /* Move this back to NPR state */
685 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) 804 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0)
686 lpfc_drop_node(phba, ndlp); 805 lpfc_drop_node(vport, ndlp);
687 else { 806 else {
688 lpfc_unreg_rpi(phba, ndlp); 807 lpfc_unreg_rpi(vport, ndlp);
689 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 808 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
690 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 809 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
691 } 810 }
692 return new_ndlp; 811 return new_ndlp;
693} 812}
694 813
695static void 814static void
696lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 815lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
697 struct lpfc_iocbq * rspiocb) 816 struct lpfc_iocbq *rspiocb)
698{ 817{
818 struct lpfc_vport *vport = cmdiocb->vport;
819 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
699 IOCB_t *irsp; 820 IOCB_t *irsp;
700 struct lpfc_nodelist *ndlp; 821 struct lpfc_nodelist *ndlp;
701 struct lpfc_dmabuf *prsp; 822 struct lpfc_dmabuf *prsp;
@@ -705,32 +826,43 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
705 cmdiocb->context_un.rsp_iocb = rspiocb; 826 cmdiocb->context_un.rsp_iocb = rspiocb;
706 827
707 irsp = &rspiocb->iocb; 828 irsp = &rspiocb->iocb;
708 ndlp = lpfc_findnode_did(phba, irsp->un.elsreq64.remoteID); 829 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
709 if (!ndlp) 830 "PLOGI cmpl: status:x%x/x%x did:x%x",
831 irsp->ulpStatus, irsp->un.ulpWord[4],
832 irsp->un.elsreq64.remoteID);
833
834 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
835 if (!ndlp) {
836 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
837 "%d (%d):0136 PLOGI completes to NPort x%x "
838 "with no ndlp. Data: x%x x%x x%x\n",
839 phba->brd_no, vport->vpi, irsp->un.elsreq64.remoteID,
840 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpIoTag);
710 goto out; 841 goto out;
842 }
711 843
712 /* Since ndlp can be freed in the disc state machine, note if this node 844 /* Since ndlp can be freed in the disc state machine, note if this node
713 * is being used during discovery. 845 * is being used during discovery.
714 */ 846 */
847 spin_lock_irq(shost->host_lock);
715 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 848 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
716 spin_lock_irq(phba->host->host_lock);
717 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 849 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
718 spin_unlock_irq(phba->host->host_lock); 850 spin_unlock_irq(shost->host_lock);
719 rc = 0; 851 rc = 0;
720 852
721 /* PLOGI completes to NPort <nlp_DID> */ 853 /* PLOGI completes to NPort <nlp_DID> */
722 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 854 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
723 "%d:0102 PLOGI completes to NPort x%x " 855 "%d (%d):0102 PLOGI completes to NPort x%x "
724 "Data: x%x x%x x%x x%x x%x\n", 856 "Data: x%x x%x x%x x%x x%x\n",
725 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 857 phba->brd_no, vport->vpi, ndlp->nlp_DID,
726 irsp->un.ulpWord[4], irsp->ulpTimeout, disc, 858 irsp->ulpStatus, irsp->un.ulpWord[4],
727 phba->num_disc_nodes); 859 irsp->ulpTimeout, disc, vport->num_disc_nodes);
728 860
729 /* Check to see if link went down during discovery */ 861 /* Check to see if link went down during discovery */
730 if (lpfc_els_chk_latt(phba)) { 862 if (lpfc_els_chk_latt(vport)) {
731 spin_lock_irq(phba->host->host_lock); 863 spin_lock_irq(shost->host_lock);
732 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 864 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
733 spin_unlock_irq(phba->host->host_lock); 865 spin_unlock_irq(shost->host_lock);
734 goto out; 866 goto out;
735 } 867 }
736 868
@@ -743,56 +875,62 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
743 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 875 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
744 /* ELS command is being retried */ 876 /* ELS command is being retried */
745 if (disc) { 877 if (disc) {
746 spin_lock_irq(phba->host->host_lock); 878 spin_lock_irq(shost->host_lock);
747 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 879 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
748 spin_unlock_irq(phba->host->host_lock); 880 spin_unlock_irq(shost->host_lock);
749 } 881 }
750 goto out; 882 goto out;
751 } 883 }
752 884
753 /* PLOGI failed */ 885 /* PLOGI failed */
886 if (ndlp->nlp_DID == NameServer_DID) {
887 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
888 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
889 "%d (%d):0250 Nameserver login error: "
890 "0x%x / 0x%x\n",
891 phba->brd_no, vport->vpi,
892 irsp->ulpStatus, irsp->un.ulpWord[4]);
893 }
894
754 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 895 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
755 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 896 if (lpfc_error_lost_link(irsp)) {
756 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
757 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
758 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
759 rc = NLP_STE_FREED_NODE; 897 rc = NLP_STE_FREED_NODE;
760 } else { 898 } else {
761 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, 899 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
762 NLP_EVT_CMPL_PLOGI); 900 NLP_EVT_CMPL_PLOGI);
763 } 901 }
764 } else { 902 } else {
765 /* Good status, call state machine */ 903 /* Good status, call state machine */
766 prsp = list_entry(((struct lpfc_dmabuf *) 904 prsp = list_entry(((struct lpfc_dmabuf *)
767 cmdiocb->context2)->list.next, 905 cmdiocb->context2)->list.next,
768 struct lpfc_dmabuf, list); 906 struct lpfc_dmabuf, list);
769 ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp); 907 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
770 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, 908 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
771 NLP_EVT_CMPL_PLOGI); 909 NLP_EVT_CMPL_PLOGI);
772 } 910 }
773 911
774 if (disc && phba->num_disc_nodes) { 912 if (disc && vport->num_disc_nodes) {
775 /* Check to see if there are more PLOGIs to be sent */ 913 /* Check to see if there are more PLOGIs to be sent */
776 lpfc_more_plogi(phba); 914 lpfc_more_plogi(vport);
777 915
778 if (phba->num_disc_nodes == 0) { 916 if (vport->num_disc_nodes == 0) {
779 spin_lock_irq(phba->host->host_lock); 917 spin_lock_irq(shost->host_lock);
780 phba->fc_flag &= ~FC_NDISC_ACTIVE; 918 vport->fc_flag &= ~FC_NDISC_ACTIVE;
781 spin_unlock_irq(phba->host->host_lock); 919 spin_unlock_irq(shost->host_lock);
782 920
783 lpfc_can_disctmo(phba); 921 lpfc_can_disctmo(vport);
784 if (phba->fc_flag & FC_RSCN_MODE) { 922 if (vport->fc_flag & FC_RSCN_MODE) {
785 /* 923 /*
786 * Check to see if more RSCNs came in while 924 * Check to see if more RSCNs came in while
787 * we were processing this one. 925 * we were processing this one.
788 */ 926 */
789 if ((phba->fc_rscn_id_cnt == 0) && 927 if ((vport->fc_rscn_id_cnt == 0) &&
790 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 928 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
791 spin_lock_irq(phba->host->host_lock); 929 spin_lock_irq(shost->host_lock);
792 phba->fc_flag &= ~FC_RSCN_MODE; 930 vport->fc_flag &= ~FC_RSCN_MODE;
793 spin_unlock_irq(phba->host->host_lock); 931 spin_unlock_irq(shost->host_lock);
794 } else { 932 } else {
795 lpfc_els_handle_rscn(phba); 933 lpfc_els_handle_rscn(vport);
796 } 934 }
797 } 935 }
798 } 936 }
@@ -804,8 +942,9 @@ out:
804} 942}
805 943
806int 944int
807lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry) 945lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
808{ 946{
947 struct lpfc_hba *phba = vport->phba;
809 struct serv_parm *sp; 948 struct serv_parm *sp;
810 IOCB_t *icmd; 949 IOCB_t *icmd;
811 struct lpfc_iocbq *elsiocb; 950 struct lpfc_iocbq *elsiocb;
@@ -813,13 +952,14 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
813 struct lpfc_sli *psli; 952 struct lpfc_sli *psli;
814 uint8_t *pcmd; 953 uint8_t *pcmd;
815 uint16_t cmdsize; 954 uint16_t cmdsize;
955 int ret;
816 956
817 psli = &phba->sli; 957 psli = &phba->sli;
818 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 958 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
819 959
820 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); 960 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
821 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, NULL, did, 961 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did,
822 ELS_CMD_PLOGI); 962 ELS_CMD_PLOGI);
823 if (!elsiocb) 963 if (!elsiocb)
824 return 1; 964 return 1;
825 965
@@ -828,8 +968,8 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
828 968
829 /* For PLOGI request, remainder of payload is service parameters */ 969 /* For PLOGI request, remainder of payload is service parameters */
830 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 970 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
831 pcmd += sizeof (uint32_t); 971 pcmd += sizeof(uint32_t);
832 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); 972 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
833 sp = (struct serv_parm *) pcmd; 973 sp = (struct serv_parm *) pcmd;
834 974
835 if (sp->cmn.fcphLow < FC_PH_4_3) 975 if (sp->cmn.fcphLow < FC_PH_4_3)
@@ -838,22 +978,27 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
838 if (sp->cmn.fcphHigh < FC_PH3) 978 if (sp->cmn.fcphHigh < FC_PH3)
839 sp->cmn.fcphHigh = FC_PH3; 979 sp->cmn.fcphHigh = FC_PH3;
840 980
981 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
982 "Issue PLOGI: did:x%x",
983 did, 0, 0);
984
841 phba->fc_stat.elsXmitPLOGI++; 985 phba->fc_stat.elsXmitPLOGI++;
842 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 986 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
843 spin_lock_irq(phba->host->host_lock); 987 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
844 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 988
845 spin_unlock_irq(phba->host->host_lock); 989 if (ret == IOCB_ERROR) {
846 lpfc_els_free_iocb(phba, elsiocb); 990 lpfc_els_free_iocb(phba, elsiocb);
847 return 1; 991 return 1;
848 } 992 }
849 spin_unlock_irq(phba->host->host_lock);
850 return 0; 993 return 0;
851} 994}
852 995
853static void 996static void
854lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 997lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
855 struct lpfc_iocbq * rspiocb) 998 struct lpfc_iocbq *rspiocb)
856{ 999{
1000 struct lpfc_vport *vport = cmdiocb->vport;
1001 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
857 IOCB_t *irsp; 1002 IOCB_t *irsp;
858 struct lpfc_sli *psli; 1003 struct lpfc_sli *psli;
859 struct lpfc_nodelist *ndlp; 1004 struct lpfc_nodelist *ndlp;
@@ -864,21 +1009,26 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
864 1009
865 irsp = &(rspiocb->iocb); 1010 irsp = &(rspiocb->iocb);
866 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1011 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
867 spin_lock_irq(phba->host->host_lock); 1012 spin_lock_irq(shost->host_lock);
868 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1013 ndlp->nlp_flag &= ~NLP_PRLI_SND;
869 spin_unlock_irq(phba->host->host_lock); 1014 spin_unlock_irq(shost->host_lock);
1015
1016 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1017 "PRLI cmpl: status:x%x/x%x did:x%x",
1018 irsp->ulpStatus, irsp->un.ulpWord[4],
1019 ndlp->nlp_DID);
870 1020
871 /* PRLI completes to NPort <nlp_DID> */ 1021 /* PRLI completes to NPort <nlp_DID> */
872 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1022 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
873 "%d:0103 PRLI completes to NPort x%x " 1023 "%d (%d):0103 PRLI completes to NPort x%x "
874 "Data: x%x x%x x%x x%x\n", 1024 "Data: x%x x%x x%x x%x\n",
875 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 1025 phba->brd_no, vport->vpi, ndlp->nlp_DID,
876 irsp->un.ulpWord[4], irsp->ulpTimeout, 1026 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
877 phba->num_disc_nodes); 1027 vport->num_disc_nodes);
878 1028
879 phba->fc_prli_sent--; 1029 vport->fc_prli_sent--;
880 /* Check to see if link went down during discovery */ 1030 /* Check to see if link went down during discovery */
881 if (lpfc_els_chk_latt(phba)) 1031 if (lpfc_els_chk_latt(vport))
882 goto out; 1032 goto out;
883 1033
884 if (irsp->ulpStatus) { 1034 if (irsp->ulpStatus) {
@@ -889,18 +1039,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
889 } 1039 }
890 /* PRLI failed */ 1040 /* PRLI failed */
891 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1041 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
892 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1042 if (lpfc_error_lost_link(irsp)) {
893 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
894 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
895 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
896 goto out; 1043 goto out;
897 } else { 1044 } else {
898 lpfc_disc_state_machine(phba, ndlp, cmdiocb, 1045 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
899 NLP_EVT_CMPL_PRLI); 1046 NLP_EVT_CMPL_PRLI);
900 } 1047 }
901 } else { 1048 } else {
902 /* Good status, call state machine */ 1049 /* Good status, call state machine */
903 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); 1050 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1051 NLP_EVT_CMPL_PRLI);
904 } 1052 }
905 1053
906out: 1054out:
@@ -909,9 +1057,11 @@ out:
909} 1057}
910 1058
911int 1059int
912lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 1060lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
913 uint8_t retry) 1061 uint8_t retry)
914{ 1062{
1063 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1064 struct lpfc_hba *phba = vport->phba;
915 PRLI *npr; 1065 PRLI *npr;
916 IOCB_t *icmd; 1066 IOCB_t *icmd;
917 struct lpfc_iocbq *elsiocb; 1067 struct lpfc_iocbq *elsiocb;
@@ -923,9 +1073,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
923 psli = &phba->sli; 1073 psli = &phba->sli;
924 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1074 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
925 1075
926 cmdsize = (sizeof (uint32_t) + sizeof (PRLI)); 1076 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
927 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1077 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
928 ndlp->nlp_DID, ELS_CMD_PRLI); 1078 ndlp->nlp_DID, ELS_CMD_PRLI);
929 if (!elsiocb) 1079 if (!elsiocb)
930 return 1; 1080 return 1;
931 1081
@@ -933,9 +1083,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
933 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1083 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
934 1084
935 /* For PRLI request, remainder of payload is service parameters */ 1085 /* For PRLI request, remainder of payload is service parameters */
936 memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t))); 1086 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
937 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; 1087 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
938 pcmd += sizeof (uint32_t); 1088 pcmd += sizeof(uint32_t);
939 1089
940 /* For PRLI, remainder of payload is PRLI parameter page */ 1090 /* For PRLI, remainder of payload is PRLI parameter page */
941 npr = (PRLI *) pcmd; 1091 npr = (PRLI *) pcmd;
@@ -955,81 +1105,88 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
955 npr->prliType = PRLI_FCP_TYPE; 1105 npr->prliType = PRLI_FCP_TYPE;
956 npr->initiatorFunc = 1; 1106 npr->initiatorFunc = 1;
957 1107
1108 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1109 "Issue PRLI: did:x%x",
1110 ndlp->nlp_DID, 0, 0);
1111
958 phba->fc_stat.elsXmitPRLI++; 1112 phba->fc_stat.elsXmitPRLI++;
959 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 1113 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
960 spin_lock_irq(phba->host->host_lock); 1114 spin_lock_irq(shost->host_lock);
961 ndlp->nlp_flag |= NLP_PRLI_SND; 1115 ndlp->nlp_flag |= NLP_PRLI_SND;
1116 spin_unlock_irq(shost->host_lock);
962 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1117 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1118 spin_lock_irq(shost->host_lock);
963 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1119 ndlp->nlp_flag &= ~NLP_PRLI_SND;
964 spin_unlock_irq(phba->host->host_lock); 1120 spin_unlock_irq(shost->host_lock);
965 lpfc_els_free_iocb(phba, elsiocb); 1121 lpfc_els_free_iocb(phba, elsiocb);
966 return 1; 1122 return 1;
967 } 1123 }
968 spin_unlock_irq(phba->host->host_lock); 1124 vport->fc_prli_sent++;
969 phba->fc_prli_sent++;
970 return 0; 1125 return 0;
971} 1126}
972 1127
973static void 1128static void
974lpfc_more_adisc(struct lpfc_hba * phba) 1129lpfc_more_adisc(struct lpfc_vport *vport)
975{ 1130{
976 int sentadisc; 1131 int sentadisc;
1132 struct lpfc_hba *phba = vport->phba;
977 1133
978 if (phba->num_disc_nodes) 1134 if (vport->num_disc_nodes)
979 phba->num_disc_nodes--; 1135 vport->num_disc_nodes--;
980 1136
981 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 1137 /* Continue discovery with <num_disc_nodes> ADISCs to go */
982 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1138 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
983 "%d:0210 Continue discovery with %d ADISCs to go " 1139 "%d (%d):0210 Continue discovery with %d ADISCs to go "
984 "Data: x%x x%x x%x\n", 1140 "Data: x%x x%x x%x\n",
985 phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt, 1141 phba->brd_no, vport->vpi, vport->num_disc_nodes,
986 phba->fc_flag, phba->hba_state); 1142 vport->fc_adisc_cnt, vport->fc_flag, vport->port_state);
987 1143
988 /* Check to see if there are more ADISCs to be sent */ 1144 /* Check to see if there are more ADISCs to be sent */
989 if (phba->fc_flag & FC_NLP_MORE) { 1145 if (vport->fc_flag & FC_NLP_MORE) {
990 lpfc_set_disctmo(phba); 1146 lpfc_set_disctmo(vport);
991 1147 /* go thru NPR nodes and issue any remaining ELS ADISCs */
992 /* go thru NPR list and issue any remaining ELS ADISCs */ 1148 sentadisc = lpfc_els_disc_adisc(vport);
993 sentadisc = lpfc_els_disc_adisc(phba);
994 } 1149 }
995 return; 1150 return;
996} 1151}
997 1152
998static void 1153static void
999lpfc_rscn_disc(struct lpfc_hba * phba) 1154lpfc_rscn_disc(struct lpfc_vport *vport)
1000{ 1155{
1156 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1157
1158 lpfc_can_disctmo(vport);
1159
1001 /* RSCN discovery */ 1160 /* RSCN discovery */
1002 /* go thru NPR list and issue ELS PLOGIs */ 1161 /* go thru NPR nodes and issue ELS PLOGIs */
1003 if (phba->fc_npr_cnt) { 1162 if (vport->fc_npr_cnt)
1004 if (lpfc_els_disc_plogi(phba)) 1163 if (lpfc_els_disc_plogi(vport))
1005 return; 1164 return;
1006 } 1165
1007 if (phba->fc_flag & FC_RSCN_MODE) { 1166 if (vport->fc_flag & FC_RSCN_MODE) {
1008 /* Check to see if more RSCNs came in while we were 1167 /* Check to see if more RSCNs came in while we were
1009 * processing this one. 1168 * processing this one.
1010 */ 1169 */
1011 if ((phba->fc_rscn_id_cnt == 0) && 1170 if ((vport->fc_rscn_id_cnt == 0) &&
1012 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 1171 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
1013 spin_lock_irq(phba->host->host_lock); 1172 spin_lock_irq(shost->host_lock);
1014 phba->fc_flag &= ~FC_RSCN_MODE; 1173 vport->fc_flag &= ~FC_RSCN_MODE;
1015 spin_unlock_irq(phba->host->host_lock); 1174 spin_unlock_irq(shost->host_lock);
1016 } else { 1175 } else {
1017 lpfc_els_handle_rscn(phba); 1176 lpfc_els_handle_rscn(vport);
1018 } 1177 }
1019 } 1178 }
1020} 1179}
1021 1180
1022static void 1181static void
1023lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1182lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1024 struct lpfc_iocbq * rspiocb) 1183 struct lpfc_iocbq *rspiocb)
1025{ 1184{
1185 struct lpfc_vport *vport = cmdiocb->vport;
1186 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1026 IOCB_t *irsp; 1187 IOCB_t *irsp;
1027 struct lpfc_sli *psli;
1028 struct lpfc_nodelist *ndlp; 1188 struct lpfc_nodelist *ndlp;
1029 LPFC_MBOXQ_t *mbox; 1189 int disc;
1030 int disc, rc;
1031
1032 psli = &phba->sli;
1033 1190
1034 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1191 /* we pass cmdiocb to state machine which needs rspiocb as well */
1035 cmdiocb->context_un.rsp_iocb = rspiocb; 1192 cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1037,27 +1194,32 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1037 irsp = &(rspiocb->iocb); 1194 irsp = &(rspiocb->iocb);
1038 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1195 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1039 1196
1197 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1198 "ADISC cmpl: status:x%x/x%x did:x%x",
1199 irsp->ulpStatus, irsp->un.ulpWord[4],
1200 ndlp->nlp_DID);
1201
1040 /* Since ndlp can be freed in the disc state machine, note if this node 1202 /* Since ndlp can be freed in the disc state machine, note if this node
1041 * is being used during discovery. 1203 * is being used during discovery.
1042 */ 1204 */
1205 spin_lock_irq(shost->host_lock);
1043 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1206 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1044 spin_lock_irq(phba->host->host_lock);
1045 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 1207 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1046 spin_unlock_irq(phba->host->host_lock); 1208 spin_unlock_irq(shost->host_lock);
1047 1209
1048 /* ADISC completes to NPort <nlp_DID> */ 1210 /* ADISC completes to NPort <nlp_DID> */
1049 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1211 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1050 "%d:0104 ADISC completes to NPort x%x " 1212 "%d (%d):0104 ADISC completes to NPort x%x "
1051 "Data: x%x x%x x%x x%x x%x\n", 1213 "Data: x%x x%x x%x x%x x%x\n",
1052 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 1214 phba->brd_no, vport->vpi, ndlp->nlp_DID,
1053 irsp->un.ulpWord[4], irsp->ulpTimeout, disc, 1215 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
1054 phba->num_disc_nodes); 1216 disc, vport->num_disc_nodes);
1055 1217
1056 /* Check to see if link went down during discovery */ 1218 /* Check to see if link went down during discovery */
1057 if (lpfc_els_chk_latt(phba)) { 1219 if (lpfc_els_chk_latt(vport)) {
1058 spin_lock_irq(phba->host->host_lock); 1220 spin_lock_irq(shost->host_lock);
1059 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1221 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1060 spin_unlock_irq(phba->host->host_lock); 1222 spin_unlock_irq(shost->host_lock);
1061 goto out; 1223 goto out;
1062 } 1224 }
1063 1225
@@ -1066,67 +1228,68 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1066 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1228 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1067 /* ELS command is being retried */ 1229 /* ELS command is being retried */
1068 if (disc) { 1230 if (disc) {
1069 spin_lock_irq(phba->host->host_lock); 1231 spin_lock_irq(shost->host_lock);
1070 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1232 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1071 spin_unlock_irq(phba->host->host_lock); 1233 spin_unlock_irq(shost->host_lock);
1072 lpfc_set_disctmo(phba); 1234 lpfc_set_disctmo(vport);
1073 } 1235 }
1074 goto out; 1236 goto out;
1075 } 1237 }
1076 /* ADISC failed */ 1238 /* ADISC failed */
1077 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1239 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1078 if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1240 if (!lpfc_error_lost_link(irsp)) {
1079 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && 1241 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1080 (irsp->un.ulpWord[4] != IOERR_LINK_DOWN) && 1242 NLP_EVT_CMPL_ADISC);
1081 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) {
1082 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1083 NLP_EVT_CMPL_ADISC);
1084 } 1243 }
1085 } else { 1244 } else {
1086 /* Good status, call state machine */ 1245 /* Good status, call state machine */
1087 lpfc_disc_state_machine(phba, ndlp, cmdiocb, 1246 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1088 NLP_EVT_CMPL_ADISC); 1247 NLP_EVT_CMPL_ADISC);
1089 } 1248 }
1090 1249
1091 if (disc && phba->num_disc_nodes) { 1250 if (disc && vport->num_disc_nodes) {
1092 /* Check to see if there are more ADISCs to be sent */ 1251 /* Check to see if there are more ADISCs to be sent */
1093 lpfc_more_adisc(phba); 1252 lpfc_more_adisc(vport);
1094 1253
1095 /* Check to see if we are done with ADISC authentication */ 1254 /* Check to see if we are done with ADISC authentication */
1096 if (phba->num_disc_nodes == 0) { 1255 if (vport->num_disc_nodes == 0) {
1097 lpfc_can_disctmo(phba); 1256 /* If we get here, there is nothing left to ADISC */
1098 /* If we get here, there is nothing left to wait for */ 1257 /*
1099 if ((phba->hba_state < LPFC_HBA_READY) && 1258 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1100 (phba->hba_state != LPFC_CLEAR_LA)) { 1259 * and continue discovery.
1101 /* Link up discovery */ 1260 */
1102 if ((mbox = mempool_alloc(phba->mbox_mem_pool, 1261 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1103 GFP_KERNEL))) { 1262 !(vport->fc_flag & FC_RSCN_MODE)) {
1104 phba->hba_state = LPFC_CLEAR_LA; 1263 lpfc_issue_reg_vpi(phba, vport);
1105 lpfc_clear_la(phba, mbox); 1264 goto out;
1106 mbox->mbox_cmpl = 1265 }
1107 lpfc_mbx_cmpl_clear_la; 1266 /*
1108 rc = lpfc_sli_issue_mbox 1267 * For SLI2, we need to set port_state to READY
1109 (phba, mbox, 1268 * and continue discovery.
1110 (MBX_NOWAIT | MBX_STOP_IOCB)); 1269 */
1111 if (rc == MBX_NOT_FINISHED) { 1270 if (vport->port_state < LPFC_VPORT_READY) {
1112 mempool_free(mbox, 1271 /* If we get here, there is nothing to ADISC */
1113 phba->mbox_mem_pool); 1272 if (vport->port_type == LPFC_PHYSICAL_PORT)
1114 lpfc_disc_flush_list(phba); 1273 lpfc_issue_clear_la(phba, vport);
1115 psli->ring[(psli->extra_ring)]. 1274
1116 flag &= 1275 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1117 ~LPFC_STOP_IOCB_EVENT; 1276 vport->num_disc_nodes = 0;
1118 psli->ring[(psli->fcp_ring)]. 1277 /* go thru NPR list, issue ELS PLOGIs */
1119 flag &= 1278 if (vport->fc_npr_cnt)
1120 ~LPFC_STOP_IOCB_EVENT; 1279 lpfc_els_disc_plogi(vport);
1121 psli->ring[(psli->next_ring)]. 1280
1122 flag &= 1281 if (!vport->num_disc_nodes) {
1123 ~LPFC_STOP_IOCB_EVENT; 1282 spin_lock_irq(shost->host_lock);
1124 phba->hba_state = 1283 vport->fc_flag &=
1125 LPFC_HBA_READY; 1284 ~FC_NDISC_ACTIVE;
1285 spin_unlock_irq(
1286 shost->host_lock);
1287 lpfc_can_disctmo(vport);
1126 } 1288 }
1127 } 1289 }
1290 vport->port_state = LPFC_VPORT_READY;
1128 } else { 1291 } else {
1129 lpfc_rscn_disc(phba); 1292 lpfc_rscn_disc(vport);
1130 } 1293 }
1131 } 1294 }
1132 } 1295 }
@@ -1136,23 +1299,22 @@ out:
1136} 1299}
1137 1300
1138int 1301int
1139lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 1302lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1140 uint8_t retry) 1303 uint8_t retry)
1141{ 1304{
1305 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1306 struct lpfc_hba *phba = vport->phba;
1142 ADISC *ap; 1307 ADISC *ap;
1143 IOCB_t *icmd; 1308 IOCB_t *icmd;
1144 struct lpfc_iocbq *elsiocb; 1309 struct lpfc_iocbq *elsiocb;
1145 struct lpfc_sli_ring *pring; 1310 struct lpfc_sli *psli = &phba->sli;
1146 struct lpfc_sli *psli; 1311 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1147 uint8_t *pcmd; 1312 uint8_t *pcmd;
1148 uint16_t cmdsize; 1313 uint16_t cmdsize;
1149 1314
1150 psli = &phba->sli; 1315 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
1151 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1316 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1152 1317 ndlp->nlp_DID, ELS_CMD_ADISC);
1153 cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
1154 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1155 ndlp->nlp_DID, ELS_CMD_ADISC);
1156 if (!elsiocb) 1318 if (!elsiocb)
1157 return 1; 1319 return 1;
1158 1320
@@ -1161,81 +1323,97 @@ lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1161 1323
1162 /* For ADISC request, remainder of payload is service parameters */ 1324 /* For ADISC request, remainder of payload is service parameters */
1163 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 1325 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1164 pcmd += sizeof (uint32_t); 1326 pcmd += sizeof(uint32_t);
1165 1327
1166 /* Fill in ADISC payload */ 1328 /* Fill in ADISC payload */
1167 ap = (ADISC *) pcmd; 1329 ap = (ADISC *) pcmd;
1168 ap->hardAL_PA = phba->fc_pref_ALPA; 1330 ap->hardAL_PA = phba->fc_pref_ALPA;
1169 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name)); 1331 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
1170 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); 1332 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1171 ap->DID = be32_to_cpu(phba->fc_myDID); 1333 ap->DID = be32_to_cpu(vport->fc_myDID);
1334
1335 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1336 "Issue ADISC: did:x%x",
1337 ndlp->nlp_DID, 0, 0);
1172 1338
1173 phba->fc_stat.elsXmitADISC++; 1339 phba->fc_stat.elsXmitADISC++;
1174 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 1340 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
1175 spin_lock_irq(phba->host->host_lock); 1341 spin_lock_irq(shost->host_lock);
1176 ndlp->nlp_flag |= NLP_ADISC_SND; 1342 ndlp->nlp_flag |= NLP_ADISC_SND;
1343 spin_unlock_irq(shost->host_lock);
1177 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1344 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1345 spin_lock_irq(shost->host_lock);
1178 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1346 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1179 spin_unlock_irq(phba->host->host_lock); 1347 spin_unlock_irq(shost->host_lock);
1180 lpfc_els_free_iocb(phba, elsiocb); 1348 lpfc_els_free_iocb(phba, elsiocb);
1181 return 1; 1349 return 1;
1182 } 1350 }
1183 spin_unlock_irq(phba->host->host_lock);
1184 return 0; 1351 return 0;
1185} 1352}
1186 1353
1187static void 1354static void
1188lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1355lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1189 struct lpfc_iocbq * rspiocb) 1356 struct lpfc_iocbq *rspiocb)
1190{ 1357{
1358 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1359 struct lpfc_vport *vport = ndlp->vport;
1360 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1191 IOCB_t *irsp; 1361 IOCB_t *irsp;
1192 struct lpfc_sli *psli; 1362 struct lpfc_sli *psli;
1193 struct lpfc_nodelist *ndlp;
1194 1363
1195 psli = &phba->sli; 1364 psli = &phba->sli;
1196 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1365 /* we pass cmdiocb to state machine which needs rspiocb as well */
1197 cmdiocb->context_un.rsp_iocb = rspiocb; 1366 cmdiocb->context_un.rsp_iocb = rspiocb;
1198 1367
1199 irsp = &(rspiocb->iocb); 1368 irsp = &(rspiocb->iocb);
1200 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1369 spin_lock_irq(shost->host_lock);
1201 spin_lock_irq(phba->host->host_lock);
1202 ndlp->nlp_flag &= ~NLP_LOGO_SND; 1370 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1203 spin_unlock_irq(phba->host->host_lock); 1371 spin_unlock_irq(shost->host_lock);
1372
1373 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1374 "LOGO cmpl: status:x%x/x%x did:x%x",
1375 irsp->ulpStatus, irsp->un.ulpWord[4],
1376 ndlp->nlp_DID);
1204 1377
1205 /* LOGO completes to NPort <nlp_DID> */ 1378 /* LOGO completes to NPort <nlp_DID> */
1206 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1379 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1207 "%d:0105 LOGO completes to NPort x%x " 1380 "%d (%d):0105 LOGO completes to NPort x%x "
1208 "Data: x%x x%x x%x x%x\n", 1381 "Data: x%x x%x x%x x%x\n",
1209 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 1382 phba->brd_no, vport->vpi, ndlp->nlp_DID,
1210 irsp->un.ulpWord[4], irsp->ulpTimeout, 1383 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
1211 phba->num_disc_nodes); 1384 vport->num_disc_nodes);
1212 1385
1213 /* Check to see if link went down during discovery */ 1386 /* Check to see if link went down during discovery */
1214 if (lpfc_els_chk_latt(phba)) 1387 if (lpfc_els_chk_latt(vport))
1215 goto out; 1388 goto out;
1216 1389
1390 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
1391 /* NLP_EVT_DEVICE_RM should unregister the RPI
1392 * which should abort all outstanding IOs.
1393 */
1394 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1395 NLP_EVT_DEVICE_RM);
1396 goto out;
1397 }
1398
1217 if (irsp->ulpStatus) { 1399 if (irsp->ulpStatus) {
1218 /* Check for retry */ 1400 /* Check for retry */
1219 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1401 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1220 /* ELS command is being retried */ 1402 /* ELS command is being retried */
1221 goto out; 1403 goto out;
1222 }
1223 /* LOGO failed */ 1404 /* LOGO failed */
1224 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1405 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1225 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1406 if (lpfc_error_lost_link(irsp))
1226 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1227 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1228 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
1229 goto out; 1407 goto out;
1230 } else { 1408 else
1231 lpfc_disc_state_machine(phba, ndlp, cmdiocb, 1409 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1232 NLP_EVT_CMPL_LOGO); 1410 NLP_EVT_CMPL_LOGO);
1233 }
1234 } else { 1411 } else {
1235 /* Good status, call state machine. 1412 /* Good status, call state machine.
1236 * This will unregister the rpi if needed. 1413 * This will unregister the rpi if needed.
1237 */ 1414 */
1238 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 1415 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1416 NLP_EVT_CMPL_LOGO);
1239 } 1417 }
1240 1418
1241out: 1419out:
@@ -1244,75 +1422,91 @@ out:
1244} 1422}
1245 1423
1246int 1424int
1247lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 1425lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1248 uint8_t retry) 1426 uint8_t retry)
1249{ 1427{
1428 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1429 struct lpfc_hba *phba = vport->phba;
1250 IOCB_t *icmd; 1430 IOCB_t *icmd;
1251 struct lpfc_iocbq *elsiocb; 1431 struct lpfc_iocbq *elsiocb;
1252 struct lpfc_sli_ring *pring; 1432 struct lpfc_sli_ring *pring;
1253 struct lpfc_sli *psli; 1433 struct lpfc_sli *psli;
1254 uint8_t *pcmd; 1434 uint8_t *pcmd;
1255 uint16_t cmdsize; 1435 uint16_t cmdsize;
1436 int rc;
1256 1437
1257 psli = &phba->sli; 1438 psli = &phba->sli;
1258 pring = &psli->ring[LPFC_ELS_RING]; 1439 pring = &psli->ring[LPFC_ELS_RING];
1259 1440
1260 cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name); 1441 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
1261 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1442 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1262 ndlp->nlp_DID, ELS_CMD_LOGO); 1443 ndlp->nlp_DID, ELS_CMD_LOGO);
1263 if (!elsiocb) 1444 if (!elsiocb)
1264 return 1; 1445 return 1;
1265 1446
1266 icmd = &elsiocb->iocb; 1447 icmd = &elsiocb->iocb;
1267 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1448 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1268 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 1449 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1269 pcmd += sizeof (uint32_t); 1450 pcmd += sizeof(uint32_t);
1270 1451
1271 /* Fill in LOGO payload */ 1452 /* Fill in LOGO payload */
1272 *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID); 1453 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
1273 pcmd += sizeof (uint32_t); 1454 pcmd += sizeof(uint32_t);
1274 memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name)); 1455 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
1456
1457 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1458 "Issue LOGO: did:x%x",
1459 ndlp->nlp_DID, 0, 0);
1275 1460
1276 phba->fc_stat.elsXmitLOGO++; 1461 phba->fc_stat.elsXmitLOGO++;
1277 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 1462 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1278 spin_lock_irq(phba->host->host_lock); 1463 spin_lock_irq(shost->host_lock);
1279 ndlp->nlp_flag |= NLP_LOGO_SND; 1464 ndlp->nlp_flag |= NLP_LOGO_SND;
1280 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1465 spin_unlock_irq(shost->host_lock);
1466 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1467
1468 if (rc == IOCB_ERROR) {
1469 spin_lock_irq(shost->host_lock);
1281 ndlp->nlp_flag &= ~NLP_LOGO_SND; 1470 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1282 spin_unlock_irq(phba->host->host_lock); 1471 spin_unlock_irq(shost->host_lock);
1283 lpfc_els_free_iocb(phba, elsiocb); 1472 lpfc_els_free_iocb(phba, elsiocb);
1284 return 1; 1473 return 1;
1285 } 1474 }
1286 spin_unlock_irq(phba->host->host_lock);
1287 return 0; 1475 return 0;
1288} 1476}
1289 1477
1290static void 1478static void
1291lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1479lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1292 struct lpfc_iocbq * rspiocb) 1480 struct lpfc_iocbq *rspiocb)
1293{ 1481{
1482 struct lpfc_vport *vport = cmdiocb->vport;
1294 IOCB_t *irsp; 1483 IOCB_t *irsp;
1295 1484
1296 irsp = &rspiocb->iocb; 1485 irsp = &rspiocb->iocb;
1297 1486
1487 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1488 "ELS cmd cmpl: status:x%x/x%x did:x%x",
1489 irsp->ulpStatus, irsp->un.ulpWord[4],
1490 irsp->un.elsreq64.remoteID);
1491
1298 /* ELS cmd tag <ulpIoTag> completes */ 1492 /* ELS cmd tag <ulpIoTag> completes */
1299 lpfc_printf_log(phba, 1493 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1300 KERN_INFO, 1494 "%d (%d):0106 ELS cmd tag x%x completes Data: x%x x%x "
1301 LOG_ELS, 1495 "x%x\n",
1302 "%d:0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 1496 phba->brd_no, vport->vpi,
1303 phba->brd_no,
1304 irsp->ulpIoTag, irsp->ulpStatus, 1497 irsp->ulpIoTag, irsp->ulpStatus,
1305 irsp->un.ulpWord[4], irsp->ulpTimeout); 1498 irsp->un.ulpWord[4], irsp->ulpTimeout);
1306 1499
1307 /* Check to see if link went down during discovery */ 1500 /* Check to see if link went down during discovery */
1308 lpfc_els_chk_latt(phba); 1501 lpfc_els_chk_latt(vport);
1309 lpfc_els_free_iocb(phba, cmdiocb); 1502 lpfc_els_free_iocb(phba, cmdiocb);
1310 return; 1503 return;
1311} 1504}
1312 1505
1313int 1506int
1314lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) 1507lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1315{ 1508{
1509 struct lpfc_hba *phba = vport->phba;
1316 IOCB_t *icmd; 1510 IOCB_t *icmd;
1317 struct lpfc_iocbq *elsiocb; 1511 struct lpfc_iocbq *elsiocb;
1318 struct lpfc_sli_ring *pring; 1512 struct lpfc_sli_ring *pring;
@@ -1323,15 +1517,16 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1323 1517
1324 psli = &phba->sli; 1518 psli = &phba->sli;
1325 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1519 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1326 cmdsize = (sizeof (uint32_t) + sizeof (SCR)); 1520 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
1327 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1521 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1328 if (!ndlp) 1522 if (!ndlp)
1329 return 1; 1523 return 1;
1330 1524
1331 lpfc_nlp_init(phba, ndlp, nportid); 1525 lpfc_nlp_init(vport, ndlp, nportid);
1526
1527 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1528 ndlp->nlp_DID, ELS_CMD_SCR);
1332 1529
1333 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1334 ndlp->nlp_DID, ELS_CMD_SCR);
1335 if (!elsiocb) { 1530 if (!elsiocb) {
1336 lpfc_nlp_put(ndlp); 1531 lpfc_nlp_put(ndlp);
1337 return 1; 1532 return 1;
@@ -1341,29 +1536,31 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1341 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1536 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1342 1537
1343 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 1538 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1344 pcmd += sizeof (uint32_t); 1539 pcmd += sizeof(uint32_t);
1345 1540
1346 /* For SCR, remainder of payload is SCR parameter page */ 1541 /* For SCR, remainder of payload is SCR parameter page */
1347 memset(pcmd, 0, sizeof (SCR)); 1542 memset(pcmd, 0, sizeof(SCR));
1348 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 1543 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1349 1544
1545 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1546 "Issue SCR: did:x%x",
1547 ndlp->nlp_DID, 0, 0);
1548
1350 phba->fc_stat.elsXmitSCR++; 1549 phba->fc_stat.elsXmitSCR++;
1351 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 1550 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1352 spin_lock_irq(phba->host->host_lock);
1353 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1551 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1354 spin_unlock_irq(phba->host->host_lock);
1355 lpfc_nlp_put(ndlp); 1552 lpfc_nlp_put(ndlp);
1356 lpfc_els_free_iocb(phba, elsiocb); 1553 lpfc_els_free_iocb(phba, elsiocb);
1357 return 1; 1554 return 1;
1358 } 1555 }
1359 spin_unlock_irq(phba->host->host_lock);
1360 lpfc_nlp_put(ndlp); 1556 lpfc_nlp_put(ndlp);
1361 return 0; 1557 return 0;
1362} 1558}
1363 1559
1364static int 1560static int
1365lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) 1561lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1366{ 1562{
1563 struct lpfc_hba *phba = vport->phba;
1367 IOCB_t *icmd; 1564 IOCB_t *icmd;
1368 struct lpfc_iocbq *elsiocb; 1565 struct lpfc_iocbq *elsiocb;
1369 struct lpfc_sli_ring *pring; 1566 struct lpfc_sli_ring *pring;
@@ -1377,14 +1574,15 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1377 1574
1378 psli = &phba->sli; 1575 psli = &phba->sli;
1379 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1576 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1380 cmdsize = (sizeof (uint32_t) + sizeof (FARP)); 1577 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
1381 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1578 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1382 if (!ndlp) 1579 if (!ndlp)
1383 return 1; 1580 return 1;
1384 lpfc_nlp_init(phba, ndlp, nportid);
1385 1581
1386 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1582 lpfc_nlp_init(vport, ndlp, nportid);
1387 ndlp->nlp_DID, ELS_CMD_RNID); 1583
1584 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1585 ndlp->nlp_DID, ELS_CMD_RNID);
1388 if (!elsiocb) { 1586 if (!elsiocb) {
1389 lpfc_nlp_put(ndlp); 1587 lpfc_nlp_put(ndlp);
1390 return 1; 1588 return 1;
@@ -1394,44 +1592,71 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1394 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1592 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1395 1593
1396 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 1594 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1397 pcmd += sizeof (uint32_t); 1595 pcmd += sizeof(uint32_t);
1398 1596
1399 /* Fill in FARPR payload */ 1597 /* Fill in FARPR payload */
1400 fp = (FARP *) (pcmd); 1598 fp = (FARP *) (pcmd);
1401 memset(fp, 0, sizeof (FARP)); 1599 memset(fp, 0, sizeof(FARP));
1402 lp = (uint32_t *) pcmd; 1600 lp = (uint32_t *) pcmd;
1403 *lp++ = be32_to_cpu(nportid); 1601 *lp++ = be32_to_cpu(nportid);
1404 *lp++ = be32_to_cpu(phba->fc_myDID); 1602 *lp++ = be32_to_cpu(vport->fc_myDID);
1405 fp->Rflags = 0; 1603 fp->Rflags = 0;
1406 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 1604 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1407 1605
1408 memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name)); 1606 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
1409 memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); 1607 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1410 if ((ondlp = lpfc_findnode_did(phba, nportid))) { 1608 ondlp = lpfc_findnode_did(vport, nportid);
1609 if (ondlp) {
1411 memcpy(&fp->OportName, &ondlp->nlp_portname, 1610 memcpy(&fp->OportName, &ondlp->nlp_portname,
1412 sizeof (struct lpfc_name)); 1611 sizeof(struct lpfc_name));
1413 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 1612 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1414 sizeof (struct lpfc_name)); 1613 sizeof(struct lpfc_name));
1415 } 1614 }
1416 1615
1616 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1617 "Issue FARPR: did:x%x",
1618 ndlp->nlp_DID, 0, 0);
1619
1417 phba->fc_stat.elsXmitFARPR++; 1620 phba->fc_stat.elsXmitFARPR++;
1418 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 1621 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1419 spin_lock_irq(phba->host->host_lock);
1420 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1622 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1421 spin_unlock_irq(phba->host->host_lock);
1422 lpfc_nlp_put(ndlp); 1623 lpfc_nlp_put(ndlp);
1423 lpfc_els_free_iocb(phba, elsiocb); 1624 lpfc_els_free_iocb(phba, elsiocb);
1424 return 1; 1625 return 1;
1425 } 1626 }
1426 spin_unlock_irq(phba->host->host_lock);
1427 lpfc_nlp_put(ndlp); 1627 lpfc_nlp_put(ndlp);
1428 return 0; 1628 return 0;
1429} 1629}
1430 1630
1631static void
1632lpfc_end_rscn(struct lpfc_vport *vport)
1633{
1634 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1635
1636 if (vport->fc_flag & FC_RSCN_MODE) {
1637 /*
1638 * Check to see if more RSCNs came in while we were
1639 * processing this one.
1640 */
1641 if (vport->fc_rscn_id_cnt ||
1642 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1643 lpfc_els_handle_rscn(vport);
1644 else {
1645 spin_lock_irq(shost->host_lock);
1646 vport->fc_flag &= ~FC_RSCN_MODE;
1647 spin_unlock_irq(shost->host_lock);
1648 }
1649 }
1650}
1651
1431void 1652void
1432lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp) 1653lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1433{ 1654{
1655 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1656
1657 spin_lock_irq(shost->host_lock);
1434 nlp->nlp_flag &= ~NLP_DELAY_TMO; 1658 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1659 spin_unlock_irq(shost->host_lock);
1435 del_timer_sync(&nlp->nlp_delayfunc); 1660 del_timer_sync(&nlp->nlp_delayfunc);
1436 nlp->nlp_last_elscmd = 0; 1661 nlp->nlp_last_elscmd = 0;
1437 1662
@@ -1439,30 +1664,21 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
1439 list_del_init(&nlp->els_retry_evt.evt_listp); 1664 list_del_init(&nlp->els_retry_evt.evt_listp);
1440 1665
1441 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 1666 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
1667 spin_lock_irq(shost->host_lock);
1442 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1668 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1443 if (phba->num_disc_nodes) { 1669 spin_unlock_irq(shost->host_lock);
1670 if (vport->num_disc_nodes) {
1444 /* Check to see if there are more 1671 /* Check to see if there are more
1445 * PLOGIs to be sent 1672 * PLOGIs to be sent
1446 */ 1673 */
1447 lpfc_more_plogi(phba); 1674 lpfc_more_plogi(vport);
1448 1675
1449 if (phba->num_disc_nodes == 0) { 1676 if (vport->num_disc_nodes == 0) {
1450 phba->fc_flag &= ~FC_NDISC_ACTIVE; 1677 spin_lock_irq(shost->host_lock);
1451 lpfc_can_disctmo(phba); 1678 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1452 if (phba->fc_flag & FC_RSCN_MODE) { 1679 spin_unlock_irq(shost->host_lock);
1453 /* 1680 lpfc_can_disctmo(vport);
1454 * Check to see if more RSCNs 1681 lpfc_end_rscn(vport);
1455 * came in while we were
1456 * processing this one.
1457 */
1458 if((phba->fc_rscn_id_cnt==0) &&
1459 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
1460 phba->fc_flag &= ~FC_RSCN_MODE;
1461 }
1462 else {
1463 lpfc_els_handle_rscn(phba);
1464 }
1465 }
1466 } 1682 }
1467 } 1683 }
1468 } 1684 }
@@ -1472,18 +1688,19 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
1472void 1688void
1473lpfc_els_retry_delay(unsigned long ptr) 1689lpfc_els_retry_delay(unsigned long ptr)
1474{ 1690{
1475 struct lpfc_nodelist *ndlp; 1691 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
1476 struct lpfc_hba *phba; 1692 struct lpfc_vport *vport = ndlp->vport;
1477 unsigned long iflag; 1693 struct lpfc_hba *phba = vport->phba;
1478 struct lpfc_work_evt *evtp; 1694 unsigned long flags;
1479 1695 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
1480 ndlp = (struct lpfc_nodelist *)ptr; 1696
1481 phba = ndlp->nlp_phba; 1697 ndlp = (struct lpfc_nodelist *) ptr;
1698 phba = ndlp->vport->phba;
1482 evtp = &ndlp->els_retry_evt; 1699 evtp = &ndlp->els_retry_evt;
1483 1700
1484 spin_lock_irqsave(phba->host->host_lock, iflag); 1701 spin_lock_irqsave(&phba->hbalock, flags);
1485 if (!list_empty(&evtp->evt_listp)) { 1702 if (!list_empty(&evtp->evt_listp)) {
1486 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1703 spin_unlock_irqrestore(&phba->hbalock, flags);
1487 return; 1704 return;
1488 } 1705 }
1489 1706
@@ -1491,33 +1708,31 @@ lpfc_els_retry_delay(unsigned long ptr)
1491 evtp->evt = LPFC_EVT_ELS_RETRY; 1708 evtp->evt = LPFC_EVT_ELS_RETRY;
1492 list_add_tail(&evtp->evt_listp, &phba->work_list); 1709 list_add_tail(&evtp->evt_listp, &phba->work_list);
1493 if (phba->work_wait) 1710 if (phba->work_wait)
1494 wake_up(phba->work_wait); 1711 lpfc_worker_wake_up(phba);
1495 1712
1496 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1713 spin_unlock_irqrestore(&phba->hbalock, flags);
1497 return; 1714 return;
1498} 1715}
1499 1716
1500void 1717void
1501lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 1718lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1502{ 1719{
1503 struct lpfc_hba *phba; 1720 struct lpfc_vport *vport = ndlp->vport;
1504 uint32_t cmd; 1721 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1505 uint32_t did; 1722 uint32_t cmd, did, retry;
1506 uint8_t retry;
1507 1723
1508 phba = ndlp->nlp_phba; 1724 spin_lock_irq(shost->host_lock);
1509 spin_lock_irq(phba->host->host_lock);
1510 did = ndlp->nlp_DID; 1725 did = ndlp->nlp_DID;
1511 cmd = ndlp->nlp_last_elscmd; 1726 cmd = ndlp->nlp_last_elscmd;
1512 ndlp->nlp_last_elscmd = 0; 1727 ndlp->nlp_last_elscmd = 0;
1513 1728
1514 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1729 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1515 spin_unlock_irq(phba->host->host_lock); 1730 spin_unlock_irq(shost->host_lock);
1516 return; 1731 return;
1517 } 1732 }
1518 1733
1519 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 1734 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1520 spin_unlock_irq(phba->host->host_lock); 1735 spin_unlock_irq(shost->host_lock);
1521 /* 1736 /*
1522 * If a discovery event readded nlp_delayfunc after timer 1737 * If a discovery event readded nlp_delayfunc after timer
1523 * firing and before processing the timer, cancel the 1738 * firing and before processing the timer, cancel the
@@ -1528,57 +1743,54 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1528 1743
1529 switch (cmd) { 1744 switch (cmd) {
1530 case ELS_CMD_FLOGI: 1745 case ELS_CMD_FLOGI:
1531 lpfc_issue_els_flogi(phba, ndlp, retry); 1746 lpfc_issue_els_flogi(vport, ndlp, retry);
1532 break; 1747 break;
1533 case ELS_CMD_PLOGI: 1748 case ELS_CMD_PLOGI:
1534 if(!lpfc_issue_els_plogi(phba, ndlp->nlp_DID, retry)) { 1749 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
1535 ndlp->nlp_prev_state = ndlp->nlp_state; 1750 ndlp->nlp_prev_state = ndlp->nlp_state;
1536 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 1751 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1537 } 1752 }
1538 break; 1753 break;
1539 case ELS_CMD_ADISC: 1754 case ELS_CMD_ADISC:
1540 if (!lpfc_issue_els_adisc(phba, ndlp, retry)) { 1755 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
1541 ndlp->nlp_prev_state = ndlp->nlp_state; 1756 ndlp->nlp_prev_state = ndlp->nlp_state;
1542 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 1757 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1543 } 1758 }
1544 break; 1759 break;
1545 case ELS_CMD_PRLI: 1760 case ELS_CMD_PRLI:
1546 if (!lpfc_issue_els_prli(phba, ndlp, retry)) { 1761 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
1547 ndlp->nlp_prev_state = ndlp->nlp_state; 1762 ndlp->nlp_prev_state = ndlp->nlp_state;
1548 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE); 1763 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1549 } 1764 }
1550 break; 1765 break;
1551 case ELS_CMD_LOGO: 1766 case ELS_CMD_LOGO:
1552 if (!lpfc_issue_els_logo(phba, ndlp, retry)) { 1767 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
1553 ndlp->nlp_prev_state = ndlp->nlp_state; 1768 ndlp->nlp_prev_state = ndlp->nlp_state;
1554 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1769 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1555 } 1770 }
1556 break; 1771 break;
1772 case ELS_CMD_FDISC:
1773 lpfc_issue_els_fdisc(vport, ndlp, retry);
1774 break;
1557 } 1775 }
1558 return; 1776 return;
1559} 1777}
1560 1778
1561static int 1779static int
1562lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1780lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1563 struct lpfc_iocbq * rspiocb) 1781 struct lpfc_iocbq *rspiocb)
1564{ 1782{
1565 IOCB_t *irsp; 1783 struct lpfc_vport *vport = cmdiocb->vport;
1566 struct lpfc_dmabuf *pcmd; 1784 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1567 struct lpfc_nodelist *ndlp; 1785 IOCB_t *irsp = &rspiocb->iocb;
1786 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1787 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1568 uint32_t *elscmd; 1788 uint32_t *elscmd;
1569 struct ls_rjt stat; 1789 struct ls_rjt stat;
1570 int retry, maxretry; 1790 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
1571 int delay; 1791 uint32_t cmd = 0;
1572 uint32_t cmd;
1573 uint32_t did; 1792 uint32_t did;
1574 1793
1575 retry = 0;
1576 delay = 0;
1577 maxretry = lpfc_max_els_tries;
1578 irsp = &rspiocb->iocb;
1579 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1580 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1581 cmd = 0;
1582 1794
1583 /* Note: context2 may be 0 for internal driver abort 1795 /* Note: context2 may be 0 for internal driver abort
1584 * of delays ELS command. 1796 * of delays ELS command.
@@ -1594,11 +1806,15 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1594 else { 1806 else {
1595 /* We should only hit this case for retrying PLOGI */ 1807 /* We should only hit this case for retrying PLOGI */
1596 did = irsp->un.elsreq64.remoteID; 1808 did = irsp->un.elsreq64.remoteID;
1597 ndlp = lpfc_findnode_did(phba, did); 1809 ndlp = lpfc_findnode_did(vport, did);
1598 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 1810 if (!ndlp && (cmd != ELS_CMD_PLOGI))
1599 return 1; 1811 return 1;
1600 } 1812 }
1601 1813
1814 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1815 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
1816 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
1817
1602 switch (irsp->ulpStatus) { 1818 switch (irsp->ulpStatus) {
1603 case IOSTAT_FCP_RSP_ERROR: 1819 case IOSTAT_FCP_RSP_ERROR:
1604 case IOSTAT_REMOTE_STOP: 1820 case IOSTAT_REMOTE_STOP:
@@ -1607,25 +1823,37 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1607 case IOSTAT_LOCAL_REJECT: 1823 case IOSTAT_LOCAL_REJECT:
1608 switch ((irsp->un.ulpWord[4] & 0xff)) { 1824 switch ((irsp->un.ulpWord[4] & 0xff)) {
1609 case IOERR_LOOP_OPEN_FAILURE: 1825 case IOERR_LOOP_OPEN_FAILURE:
1610 if (cmd == ELS_CMD_PLOGI) { 1826 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
1611 if (cmdiocb->retry == 0) { 1827 delay = 1000;
1612 delay = 1;
1613 }
1614 }
1615 retry = 1; 1828 retry = 1;
1616 break; 1829 break;
1617 1830
1618 case IOERR_SEQUENCE_TIMEOUT: 1831 case IOERR_ILLEGAL_COMMAND:
1619 retry = 1; 1832 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) &&
1833 (cmd == ELS_CMD_FDISC)) {
1834 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1835 "%d (%d):0124 FDISC failed (3/6) retrying...\n",
1836 phba->brd_no, vport->vpi);
1837 lpfc_mbx_unreg_vpi(vport);
1838 retry = 1;
1839 /* Always retry for this case */
1840 cmdiocb->retry = 0;
1841 }
1620 break; 1842 break;
1621 1843
1622 case IOERR_NO_RESOURCES: 1844 case IOERR_NO_RESOURCES:
1623 if (cmd == ELS_CMD_PLOGI) { 1845 retry = 1;
1624 delay = 1; 1846 if (cmdiocb->retry > 100)
1625 } 1847 delay = 100;
1848 maxretry = 250;
1849 break;
1850
1851 case IOERR_ILLEGAL_FRAME:
1852 delay = 100;
1626 retry = 1; 1853 retry = 1;
1627 break; 1854 break;
1628 1855
1856 case IOERR_SEQUENCE_TIMEOUT:
1629 case IOERR_INVALID_RPI: 1857 case IOERR_INVALID_RPI:
1630 retry = 1; 1858 retry = 1;
1631 break; 1859 break;
@@ -1655,27 +1883,57 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1655 if (stat.un.b.lsRjtRsnCodeExp == 1883 if (stat.un.b.lsRjtRsnCodeExp ==
1656 LSEXP_CMD_IN_PROGRESS) { 1884 LSEXP_CMD_IN_PROGRESS) {
1657 if (cmd == ELS_CMD_PLOGI) { 1885 if (cmd == ELS_CMD_PLOGI) {
1658 delay = 1; 1886 delay = 1000;
1659 maxretry = 48; 1887 maxretry = 48;
1660 } 1888 }
1661 retry = 1; 1889 retry = 1;
1662 break; 1890 break;
1663 } 1891 }
1664 if (cmd == ELS_CMD_PLOGI) { 1892 if (cmd == ELS_CMD_PLOGI) {
1665 delay = 1; 1893 delay = 1000;
1666 maxretry = lpfc_max_els_tries + 1; 1894 maxretry = lpfc_max_els_tries + 1;
1667 retry = 1; 1895 retry = 1;
1668 break; 1896 break;
1669 } 1897 }
1898 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1899 (cmd == ELS_CMD_FDISC) &&
1900 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
1901 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1902 "%d (%d):0125 FDISC Failed (x%x)."
1903 " Fabric out of resources\n",
1904 phba->brd_no, vport->vpi, stat.un.lsRjtError);
1905 lpfc_vport_set_state(vport,
1906 FC_VPORT_NO_FABRIC_RSCS);
1907 }
1670 break; 1908 break;
1671 1909
1672 case LSRJT_LOGICAL_BSY: 1910 case LSRJT_LOGICAL_BSY:
1673 if (cmd == ELS_CMD_PLOGI) { 1911 if ((cmd == ELS_CMD_PLOGI) ||
1674 delay = 1; 1912 (cmd == ELS_CMD_PRLI)) {
1913 delay = 1000;
1675 maxretry = 48; 1914 maxretry = 48;
1915 } else if (cmd == ELS_CMD_FDISC) {
1916 /* Always retry for this case */
1917 cmdiocb->retry = 0;
1676 } 1918 }
1677 retry = 1; 1919 retry = 1;
1678 break; 1920 break;
1921
1922 case LSRJT_LOGICAL_ERR:
1923 case LSRJT_PROTOCOL_ERR:
1924 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1925 (cmd == ELS_CMD_FDISC) &&
1926 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
1927 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
1928 ) {
1929 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1930 "%d (%d):0123 FDISC Failed (x%x)."
1931 " Fabric Detected Bad WWN\n",
1932 phba->brd_no, vport->vpi, stat.un.lsRjtError);
1933 lpfc_vport_set_state(vport,
1934 FC_VPORT_FABRIC_REJ_WWN);
1935 }
1936 break;
1679 } 1937 }
1680 break; 1938 break;
1681 1939
@@ -1695,21 +1953,27 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1695 retry = 0; 1953 retry = 0;
1696 } 1954 }
1697 1955
1956 if ((vport->load_flag & FC_UNLOADING) != 0)
1957 retry = 0;
1958
1698 if (retry) { 1959 if (retry) {
1699 1960
1700 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 1961 /* Retry ELS command <elsCmd> to remote NPORT <did> */
1701 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1962 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1702 "%d:0107 Retry ELS command x%x to remote " 1963 "%d (%d):0107 Retry ELS command x%x to remote "
1703 "NPORT x%x Data: x%x x%x\n", 1964 "NPORT x%x Data: x%x x%x\n",
1704 phba->brd_no, 1965 phba->brd_no, vport->vpi,
1705 cmd, did, cmdiocb->retry, delay); 1966 cmd, did, cmdiocb->retry, delay);
1706 1967
1707 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) { 1968 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
1969 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1970 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
1971 /* Don't reset timer for no resources */
1972
1708 /* If discovery / RSCN timer is running, reset it */ 1973 /* If discovery / RSCN timer is running, reset it */
1709 if (timer_pending(&phba->fc_disctmo) || 1974 if (timer_pending(&vport->fc_disctmo) ||
1710 (phba->fc_flag & FC_RSCN_MODE)) { 1975 (vport->fc_flag & FC_RSCN_MODE))
1711 lpfc_set_disctmo(phba); 1976 lpfc_set_disctmo(vport);
1712 }
1713 } 1977 }
1714 1978
1715 phba->fc_stat.elsXmitRetry++; 1979 phba->fc_stat.elsXmitRetry++;
@@ -1717,50 +1981,62 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1717 phba->fc_stat.elsDelayRetry++; 1981 phba->fc_stat.elsDelayRetry++;
1718 ndlp->nlp_retry = cmdiocb->retry; 1982 ndlp->nlp_retry = cmdiocb->retry;
1719 1983
1720 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1984 /* delay is specified in milliseconds */
1985 mod_timer(&ndlp->nlp_delayfunc,
1986 jiffies + msecs_to_jiffies(delay));
1987 spin_lock_irq(shost->host_lock);
1721 ndlp->nlp_flag |= NLP_DELAY_TMO; 1988 ndlp->nlp_flag |= NLP_DELAY_TMO;
1989 spin_unlock_irq(shost->host_lock);
1722 1990
1723 ndlp->nlp_prev_state = ndlp->nlp_state; 1991 ndlp->nlp_prev_state = ndlp->nlp_state;
1724 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1992 if (cmd == ELS_CMD_PRLI)
1993 lpfc_nlp_set_state(vport, ndlp,
1994 NLP_STE_REG_LOGIN_ISSUE);
1995 else
1996 lpfc_nlp_set_state(vport, ndlp,
1997 NLP_STE_NPR_NODE);
1725 ndlp->nlp_last_elscmd = cmd; 1998 ndlp->nlp_last_elscmd = cmd;
1726 1999
1727 return 1; 2000 return 1;
1728 } 2001 }
1729 switch (cmd) { 2002 switch (cmd) {
1730 case ELS_CMD_FLOGI: 2003 case ELS_CMD_FLOGI:
1731 lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry); 2004 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
2005 return 1;
2006 case ELS_CMD_FDISC:
2007 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
1732 return 1; 2008 return 1;
1733 case ELS_CMD_PLOGI: 2009 case ELS_CMD_PLOGI:
1734 if (ndlp) { 2010 if (ndlp) {
1735 ndlp->nlp_prev_state = ndlp->nlp_state; 2011 ndlp->nlp_prev_state = ndlp->nlp_state;
1736 lpfc_nlp_set_state(phba, ndlp, 2012 lpfc_nlp_set_state(vport, ndlp,
1737 NLP_STE_PLOGI_ISSUE); 2013 NLP_STE_PLOGI_ISSUE);
1738 } 2014 }
1739 lpfc_issue_els_plogi(phba, did, cmdiocb->retry); 2015 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
1740 return 1; 2016 return 1;
1741 case ELS_CMD_ADISC: 2017 case ELS_CMD_ADISC:
1742 ndlp->nlp_prev_state = ndlp->nlp_state; 2018 ndlp->nlp_prev_state = ndlp->nlp_state;
1743 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 2019 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1744 lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry); 2020 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
1745 return 1; 2021 return 1;
1746 case ELS_CMD_PRLI: 2022 case ELS_CMD_PRLI:
1747 ndlp->nlp_prev_state = ndlp->nlp_state; 2023 ndlp->nlp_prev_state = ndlp->nlp_state;
1748 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE); 2024 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1749 lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry); 2025 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
1750 return 1; 2026 return 1;
1751 case ELS_CMD_LOGO: 2027 case ELS_CMD_LOGO:
1752 ndlp->nlp_prev_state = ndlp->nlp_state; 2028 ndlp->nlp_prev_state = ndlp->nlp_state;
1753 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 2029 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1754 lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry); 2030 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
1755 return 1; 2031 return 1;
1756 } 2032 }
1757 } 2033 }
1758 2034
1759 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 2035 /* No retry ELS command <elsCmd> to remote NPORT <did> */
1760 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2036 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1761 "%d:0108 No retry ELS command x%x to remote NPORT x%x " 2037 "%d (%d):0108 No retry ELS command x%x to remote "
1762 "Data: x%x\n", 2038 "NPORT x%x Data: x%x\n",
1763 phba->brd_no, 2039 phba->brd_no, vport->vpi,
1764 cmd, did, cmdiocb->retry); 2040 cmd, did, cmdiocb->retry);
1765 2041
1766 return 0; 2042 return 0;
@@ -1795,33 +2071,36 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
1795 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2071 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1796 kfree(buf_ptr); 2072 kfree(buf_ptr);
1797 } 2073 }
1798 spin_lock_irq(phba->host->host_lock);
1799 lpfc_sli_release_iocbq(phba, elsiocb); 2074 lpfc_sli_release_iocbq(phba, elsiocb);
1800 spin_unlock_irq(phba->host->host_lock);
1801 return 0; 2075 return 0;
1802} 2076}
1803 2077
1804static void 2078static void
1805lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2079lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1806 struct lpfc_iocbq * rspiocb) 2080 struct lpfc_iocbq *rspiocb)
1807{ 2081{
1808 struct lpfc_nodelist *ndlp; 2082 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2083 struct lpfc_vport *vport = cmdiocb->vport;
2084 IOCB_t *irsp;
1809 2085
1810 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2086 irsp = &rspiocb->iocb;
2087 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2088 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
2089 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
1811 2090
1812 /* ACC to LOGO completes to NPort <nlp_DID> */ 2091 /* ACC to LOGO completes to NPort <nlp_DID> */
1813 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2092 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1814 "%d:0109 ACC to LOGO completes to NPort x%x " 2093 "%d (%d):0109 ACC to LOGO completes to NPort x%x "
1815 "Data: x%x x%x x%x\n", 2094 "Data: x%x x%x x%x\n",
1816 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 2095 phba->brd_no, vport->vpi, ndlp->nlp_DID,
1817 ndlp->nlp_state, ndlp->nlp_rpi); 2096 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1818 2097
1819 switch (ndlp->nlp_state) { 2098 switch (ndlp->nlp_state) {
1820 case NLP_STE_UNUSED_NODE: /* node is just allocated */ 2099 case NLP_STE_UNUSED_NODE: /* node is just allocated */
1821 lpfc_drop_node(phba, ndlp); 2100 lpfc_drop_node(vport, ndlp);
1822 break; 2101 break;
1823 case NLP_STE_NPR_NODE: /* NPort Recovery mode */ 2102 case NLP_STE_NPR_NODE: /* NPort Recovery mode */
1824 lpfc_unreg_rpi(phba, ndlp); 2103 lpfc_unreg_rpi(vport, ndlp);
1825 break; 2104 break;
1826 default: 2105 default:
1827 break; 2106 break;
@@ -1830,24 +2109,38 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1830 return; 2109 return;
1831} 2110}
1832 2111
2112void
2113lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2114{
2115 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2116 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2117
2118 pmb->context1 = NULL;
2119 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2120 kfree(mp);
2121 mempool_free(pmb, phba->mbox_mem_pool);
2122 lpfc_nlp_put(ndlp);
2123 return;
2124}
2125
1833static void 2126static void
1834lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2127lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1835 struct lpfc_iocbq *rspiocb) 2128 struct lpfc_iocbq *rspiocb)
1836{ 2129{
2130 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2131 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
2132 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
1837 IOCB_t *irsp; 2133 IOCB_t *irsp;
1838 struct lpfc_nodelist *ndlp;
1839 LPFC_MBOXQ_t *mbox = NULL; 2134 LPFC_MBOXQ_t *mbox = NULL;
1840 struct lpfc_dmabuf *mp; 2135 struct lpfc_dmabuf *mp = NULL;
1841 2136
1842 irsp = &rspiocb->iocb; 2137 irsp = &rspiocb->iocb;
1843 2138
1844 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1845 if (cmdiocb->context_un.mbox) 2139 if (cmdiocb->context_un.mbox)
1846 mbox = cmdiocb->context_un.mbox; 2140 mbox = cmdiocb->context_un.mbox;
1847 2141
1848
1849 /* Check to see if link went down during discovery */ 2142 /* Check to see if link went down during discovery */
1850 if (lpfc_els_chk_latt(phba) || !ndlp) { 2143 if (!ndlp || lpfc_els_chk_latt(vport)) {
1851 if (mbox) { 2144 if (mbox) {
1852 mp = (struct lpfc_dmabuf *) mbox->context1; 2145 mp = (struct lpfc_dmabuf *) mbox->context1;
1853 if (mp) { 2146 if (mp) {
@@ -1859,24 +2152,37 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1859 goto out; 2152 goto out;
1860 } 2153 }
1861 2154
2155 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2156 "ACC cmpl: status:x%x/x%x did:x%x",
2157 irsp->ulpStatus, irsp->un.ulpWord[4],
2158 irsp->un.rcvels.remoteID);
2159
1862 /* ELS response tag <ulpIoTag> completes */ 2160 /* ELS response tag <ulpIoTag> completes */
1863 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2161 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1864 "%d:0110 ELS response tag x%x completes " 2162 "%d (%d):0110 ELS response tag x%x completes "
1865 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 2163 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
1866 phba->brd_no, 2164 phba->brd_no, vport->vpi,
1867 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 2165 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
1868 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 2166 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
1869 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2167 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
1870 ndlp->nlp_rpi); 2168 ndlp->nlp_rpi);
1871 2169
1872 if (mbox) { 2170 if (mbox) {
1873 if ((rspiocb->iocb.ulpStatus == 0) 2171 if ((rspiocb->iocb.ulpStatus == 0)
1874 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 2172 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1875 lpfc_unreg_rpi(phba, ndlp); 2173 lpfc_unreg_rpi(vport, ndlp);
1876 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1877 mbox->context2 = lpfc_nlp_get(ndlp); 2174 mbox->context2 = lpfc_nlp_get(ndlp);
1878 ndlp->nlp_prev_state = ndlp->nlp_state; 2175 mbox->vport = vport;
1879 lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE); 2176 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
2177 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2178 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2179 }
2180 else {
2181 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
2182 ndlp->nlp_prev_state = ndlp->nlp_state;
2183 lpfc_nlp_set_state(vport, ndlp,
2184 NLP_STE_REG_LOGIN_ISSUE);
2185 }
1880 if (lpfc_sli_issue_mbox(phba, mbox, 2186 if (lpfc_sli_issue_mbox(phba, mbox,
1881 (MBX_NOWAIT | MBX_STOP_IOCB)) 2187 (MBX_NOWAIT | MBX_STOP_IOCB))
1882 != MBX_NOT_FINISHED) { 2188 != MBX_NOT_FINISHED) {
@@ -1886,15 +2192,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1886 /* NOTE: we should have messages for unsuccessful 2192 /* NOTE: we should have messages for unsuccessful
1887 reglogin */ 2193 reglogin */
1888 } else { 2194 } else {
1889 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ 2195 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
1890 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2196 if (!lpfc_error_lost_link(irsp) &&
1891 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || 2197 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1892 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || 2198 lpfc_drop_node(vport, ndlp);
1893 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { 2199 ndlp = NULL;
1894 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1895 lpfc_drop_node(phba, ndlp);
1896 ndlp = NULL;
1897 }
1898 } 2200 }
1899 } 2201 }
1900 mp = (struct lpfc_dmabuf *) mbox->context1; 2202 mp = (struct lpfc_dmabuf *) mbox->context1;
@@ -1906,19 +2208,21 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1906 } 2208 }
1907out: 2209out:
1908 if (ndlp) { 2210 if (ndlp) {
1909 spin_lock_irq(phba->host->host_lock); 2211 spin_lock_irq(shost->host_lock);
1910 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 2212 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
1911 spin_unlock_irq(phba->host->host_lock); 2213 spin_unlock_irq(shost->host_lock);
1912 } 2214 }
1913 lpfc_els_free_iocb(phba, cmdiocb); 2215 lpfc_els_free_iocb(phba, cmdiocb);
1914 return; 2216 return;
1915} 2217}
1916 2218
1917int 2219int
1918lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, 2220lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
1919 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp, 2221 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
1920 LPFC_MBOXQ_t * mbox, uint8_t newnode) 2222 LPFC_MBOXQ_t *mbox, uint8_t newnode)
1921{ 2223{
2224 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2225 struct lpfc_hba *phba = vport->phba;
1922 IOCB_t *icmd; 2226 IOCB_t *icmd;
1923 IOCB_t *oldcmd; 2227 IOCB_t *oldcmd;
1924 struct lpfc_iocbq *elsiocb; 2228 struct lpfc_iocbq *elsiocb;
@@ -1935,23 +2239,30 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1935 2239
1936 switch (flag) { 2240 switch (flag) {
1937 case ELS_CMD_ACC: 2241 case ELS_CMD_ACC:
1938 cmdsize = sizeof (uint32_t); 2242 cmdsize = sizeof(uint32_t);
1939 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2243 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
1940 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2244 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1941 if (!elsiocb) { 2245 if (!elsiocb) {
2246 spin_lock_irq(shost->host_lock);
1942 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2247 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2248 spin_unlock_irq(shost->host_lock);
1943 return 1; 2249 return 1;
1944 } 2250 }
2251
1945 icmd = &elsiocb->iocb; 2252 icmd = &elsiocb->iocb;
1946 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2253 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1947 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2254 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1948 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2255 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1949 pcmd += sizeof (uint32_t); 2256 pcmd += sizeof(uint32_t);
2257
2258 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2259 "Issue ACC: did:x%x flg:x%x",
2260 ndlp->nlp_DID, ndlp->nlp_flag, 0);
1950 break; 2261 break;
1951 case ELS_CMD_PLOGI: 2262 case ELS_CMD_PLOGI:
1952 cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t)); 2263 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
1953 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2264 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
1954 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2265 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1955 if (!elsiocb) 2266 if (!elsiocb)
1956 return 1; 2267 return 1;
1957 2268
@@ -1963,12 +2274,16 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1963 elsiocb->context_un.mbox = mbox; 2274 elsiocb->context_un.mbox = mbox;
1964 2275
1965 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2276 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1966 pcmd += sizeof (uint32_t); 2277 pcmd += sizeof(uint32_t);
1967 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); 2278 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2279
2280 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2281 "Issue ACC PLOGI: did:x%x flg:x%x",
2282 ndlp->nlp_DID, ndlp->nlp_flag, 0);
1968 break; 2283 break;
1969 case ELS_CMD_PRLO: 2284 case ELS_CMD_PRLO:
1970 cmdsize = sizeof (uint32_t) + sizeof (PRLO); 2285 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
1971 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2286 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
1972 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 2287 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
1973 if (!elsiocb) 2288 if (!elsiocb)
1974 return 1; 2289 return 1;
@@ -1978,10 +2293,14 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1978 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2293 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1979 2294
1980 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 2295 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
1981 sizeof (uint32_t) + sizeof (PRLO)); 2296 sizeof(uint32_t) + sizeof(PRLO));
1982 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 2297 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
1983 els_pkt_ptr = (ELS_PKT *) pcmd; 2298 els_pkt_ptr = (ELS_PKT *) pcmd;
1984 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 2299 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
2300
2301 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2302 "Issue ACC PRLO: did:x%x flg:x%x",
2303 ndlp->nlp_DID, ndlp->nlp_flag, 0);
1985 break; 2304 break;
1986 default: 2305 default:
1987 return 1; 2306 return 1;
@@ -1994,25 +2313,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1994 2313
1995 /* Xmit ELS ACC response tag <ulpIoTag> */ 2314 /* Xmit ELS ACC response tag <ulpIoTag> */
1996 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2315 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1997 "%d:0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 2316 "%d (%d):0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
1998 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n", 2317 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
1999 phba->brd_no, elsiocb->iotag, 2318 phba->brd_no, vport->vpi, elsiocb->iotag,
2000 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2319 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2001 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2320 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2002 2321
2003 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 2322 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2004 spin_lock_irq(phba->host->host_lock); 2323 spin_lock_irq(shost->host_lock);
2005 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2324 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2006 spin_unlock_irq(phba->host->host_lock); 2325 spin_unlock_irq(shost->host_lock);
2007 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 2326 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
2008 } else { 2327 } else {
2009 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2328 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2010 } 2329 }
2011 2330
2012 phba->fc_stat.elsXmitACC++; 2331 phba->fc_stat.elsXmitACC++;
2013 spin_lock_irq(phba->host->host_lock);
2014 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2332 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2015 spin_unlock_irq(phba->host->host_lock);
2016 if (rc == IOCB_ERROR) { 2333 if (rc == IOCB_ERROR) {
2017 lpfc_els_free_iocb(phba, elsiocb); 2334 lpfc_els_free_iocb(phba, elsiocb);
2018 return 1; 2335 return 1;
@@ -2021,9 +2338,11 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
2021} 2338}
2022 2339
2023int 2340int
2024lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError, 2341lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2025 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) 2342 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
2343 LPFC_MBOXQ_t *mbox)
2026{ 2344{
2345 struct lpfc_hba *phba = vport->phba;
2027 IOCB_t *icmd; 2346 IOCB_t *icmd;
2028 IOCB_t *oldcmd; 2347 IOCB_t *oldcmd;
2029 struct lpfc_iocbq *elsiocb; 2348 struct lpfc_iocbq *elsiocb;
@@ -2036,9 +2355,9 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
2036 psli = &phba->sli; 2355 psli = &phba->sli;
2037 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2356 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2038 2357
2039 cmdsize = 2 * sizeof (uint32_t); 2358 cmdsize = 2 * sizeof(uint32_t);
2040 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2359 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2041 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 2360 ndlp->nlp_DID, ELS_CMD_LS_RJT);
2042 if (!elsiocb) 2361 if (!elsiocb)
2043 return 1; 2362 return 1;
2044 2363
@@ -2048,22 +2367,30 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
2048 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2367 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2049 2368
2050 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 2369 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
2051 pcmd += sizeof (uint32_t); 2370 pcmd += sizeof(uint32_t);
2052 *((uint32_t *) (pcmd)) = rejectError; 2371 *((uint32_t *) (pcmd)) = rejectError;
2053 2372
2373 if (mbox) {
2374 elsiocb->context_un.mbox = mbox;
2375 elsiocb->context1 = lpfc_nlp_get(ndlp);
2376 }
2377
2054 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 2378 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
2055 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2379 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2056 "%d:0129 Xmit ELS RJT x%x response tag x%x xri x%x, " 2380 "%d (%d):0129 Xmit ELS RJT x%x response tag x%x "
2057 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 2381 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2058 phba->brd_no, rejectError, elsiocb->iotag, 2382 "rpi x%x\n",
2383 phba->brd_no, vport->vpi, rejectError, elsiocb->iotag,
2059 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2384 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2060 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2385 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2061 2386
2387 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2388 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
2389 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
2390
2062 phba->fc_stat.elsXmitLSRJT++; 2391 phba->fc_stat.elsXmitLSRJT++;
2063 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2392 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2064 spin_lock_irq(phba->host->host_lock);
2065 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2393 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2066 spin_unlock_irq(phba->host->host_lock);
2067 if (rc == IOCB_ERROR) { 2394 if (rc == IOCB_ERROR) {
2068 lpfc_els_free_iocb(phba, elsiocb); 2395 lpfc_els_free_iocb(phba, elsiocb);
2069 return 1; 2396 return 1;
@@ -2072,25 +2399,22 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
2072} 2399}
2073 2400
2074int 2401int
2075lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba, 2402lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2076 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) 2403 struct lpfc_nodelist *ndlp)
2077{ 2404{
2405 struct lpfc_hba *phba = vport->phba;
2406 struct lpfc_sli *psli = &phba->sli;
2407 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2078 ADISC *ap; 2408 ADISC *ap;
2079 IOCB_t *icmd; 2409 IOCB_t *icmd, *oldcmd;
2080 IOCB_t *oldcmd;
2081 struct lpfc_iocbq *elsiocb; 2410 struct lpfc_iocbq *elsiocb;
2082 struct lpfc_sli_ring *pring;
2083 struct lpfc_sli *psli;
2084 uint8_t *pcmd; 2411 uint8_t *pcmd;
2085 uint16_t cmdsize; 2412 uint16_t cmdsize;
2086 int rc; 2413 int rc;
2087 2414
2088 psli = &phba->sli; 2415 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2089 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2416 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2090 2417 ndlp->nlp_DID, ELS_CMD_ACC);
2091 cmdsize = sizeof (uint32_t) + sizeof (ADISC);
2092 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2093 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2094 if (!elsiocb) 2418 if (!elsiocb)
2095 return 1; 2419 return 1;
2096 2420
@@ -2100,28 +2424,30 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
2100 2424
2101 /* Xmit ADISC ACC response tag <ulpIoTag> */ 2425 /* Xmit ADISC ACC response tag <ulpIoTag> */
2102 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2426 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2103 "%d:0130 Xmit ADISC ACC response iotag x%x xri: " 2427 "%d (%d):0130 Xmit ADISC ACC response iotag x%x xri: "
2104 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 2428 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
2105 phba->brd_no, elsiocb->iotag, 2429 phba->brd_no, vport->vpi, elsiocb->iotag,
2106 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2430 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2107 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2431 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2108 2432
2109 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2433 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2110 2434
2111 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2435 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2112 pcmd += sizeof (uint32_t); 2436 pcmd += sizeof(uint32_t);
2113 2437
2114 ap = (ADISC *) (pcmd); 2438 ap = (ADISC *) (pcmd);
2115 ap->hardAL_PA = phba->fc_pref_ALPA; 2439 ap->hardAL_PA = phba->fc_pref_ALPA;
2116 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name)); 2440 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2117 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); 2441 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2118 ap->DID = be32_to_cpu(phba->fc_myDID); 2442 ap->DID = be32_to_cpu(vport->fc_myDID);
2443
2444 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2445 "Issue ACC ADISC: did:x%x flg:x%x",
2446 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2119 2447
2120 phba->fc_stat.elsXmitACC++; 2448 phba->fc_stat.elsXmitACC++;
2121 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2449 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2122 spin_lock_irq(phba->host->host_lock);
2123 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2450 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2124 spin_unlock_irq(phba->host->host_lock);
2125 if (rc == IOCB_ERROR) { 2451 if (rc == IOCB_ERROR) {
2126 lpfc_els_free_iocb(phba, elsiocb); 2452 lpfc_els_free_iocb(phba, elsiocb);
2127 return 1; 2453 return 1;
@@ -2130,9 +2456,10 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
2130} 2456}
2131 2457
2132int 2458int
2133lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb, 2459lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2134 struct lpfc_nodelist *ndlp) 2460 struct lpfc_nodelist *ndlp)
2135{ 2461{
2462 struct lpfc_hba *phba = vport->phba;
2136 PRLI *npr; 2463 PRLI *npr;
2137 lpfc_vpd_t *vpd; 2464 lpfc_vpd_t *vpd;
2138 IOCB_t *icmd; 2465 IOCB_t *icmd;
@@ -2147,8 +2474,8 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
2147 psli = &phba->sli; 2474 psli = &phba->sli;
2148 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2475 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2149 2476
2150 cmdsize = sizeof (uint32_t) + sizeof (PRLI); 2477 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2151 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, ndlp, 2478 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2152 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); 2479 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
2153 if (!elsiocb) 2480 if (!elsiocb)
2154 return 1; 2481 return 1;
@@ -2159,19 +2486,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
2159 2486
2160 /* Xmit PRLI ACC response tag <ulpIoTag> */ 2487 /* Xmit PRLI ACC response tag <ulpIoTag> */
2161 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2488 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2162 "%d:0131 Xmit PRLI ACC response tag x%x xri x%x, " 2489 "%d (%d):0131 Xmit PRLI ACC response tag x%x xri x%x, "
2163 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 2490 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2164 phba->brd_no, elsiocb->iotag, 2491 phba->brd_no, vport->vpi, elsiocb->iotag,
2165 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2492 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2166 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2493 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2167 2494
2168 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2495 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2169 2496
2170 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 2497 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2171 pcmd += sizeof (uint32_t); 2498 pcmd += sizeof(uint32_t);
2172 2499
2173 /* For PRLI, remainder of payload is PRLI parameter page */ 2500 /* For PRLI, remainder of payload is PRLI parameter page */
2174 memset(pcmd, 0, sizeof (PRLI)); 2501 memset(pcmd, 0, sizeof(PRLI));
2175 2502
2176 npr = (PRLI *) pcmd; 2503 npr = (PRLI *) pcmd;
2177 vpd = &phba->vpd; 2504 vpd = &phba->vpd;
@@ -2193,12 +2520,14 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
2193 npr->prliType = PRLI_FCP_TYPE; 2520 npr->prliType = PRLI_FCP_TYPE;
2194 npr->initiatorFunc = 1; 2521 npr->initiatorFunc = 1;
2195 2522
2523 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2524 "Issue ACC PRLI: did:x%x flg:x%x",
2525 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2526
2196 phba->fc_stat.elsXmitACC++; 2527 phba->fc_stat.elsXmitACC++;
2197 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2528 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2198 2529
2199 spin_lock_irq(phba->host->host_lock);
2200 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2530 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2201 spin_unlock_irq(phba->host->host_lock);
2202 if (rc == IOCB_ERROR) { 2531 if (rc == IOCB_ERROR) {
2203 lpfc_els_free_iocb(phba, elsiocb); 2532 lpfc_els_free_iocb(phba, elsiocb);
2204 return 1; 2533 return 1;
@@ -2207,12 +2536,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
2207} 2536}
2208 2537
2209static int 2538static int
2210lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format, 2539lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
2211 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 2540 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
2212{ 2541{
2542 struct lpfc_hba *phba = vport->phba;
2213 RNID *rn; 2543 RNID *rn;
2214 IOCB_t *icmd; 2544 IOCB_t *icmd, *oldcmd;
2215 IOCB_t *oldcmd;
2216 struct lpfc_iocbq *elsiocb; 2545 struct lpfc_iocbq *elsiocb;
2217 struct lpfc_sli_ring *pring; 2546 struct lpfc_sli_ring *pring;
2218 struct lpfc_sli *psli; 2547 struct lpfc_sli *psli;
@@ -2223,13 +2552,13 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
2223 psli = &phba->sli; 2552 psli = &phba->sli;
2224 pring = &psli->ring[LPFC_ELS_RING]; 2553 pring = &psli->ring[LPFC_ELS_RING];
2225 2554
2226 cmdsize = sizeof (uint32_t) + sizeof (uint32_t) 2555 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
2227 + (2 * sizeof (struct lpfc_name)); 2556 + (2 * sizeof(struct lpfc_name));
2228 if (format) 2557 if (format)
2229 cmdsize += sizeof (RNID_TOP_DISC); 2558 cmdsize += sizeof(RNID_TOP_DISC);
2230 2559
2231 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2560 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2232 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2561 ndlp->nlp_DID, ELS_CMD_ACC);
2233 if (!elsiocb) 2562 if (!elsiocb)
2234 return 1; 2563 return 1;
2235 2564
@@ -2239,30 +2568,30 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
2239 2568
2240 /* Xmit RNID ACC response tag <ulpIoTag> */ 2569 /* Xmit RNID ACC response tag <ulpIoTag> */
2241 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2570 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2242 "%d:0132 Xmit RNID ACC response tag x%x " 2571 "%d (%d):0132 Xmit RNID ACC response tag x%x "
2243 "xri x%x\n", 2572 "xri x%x\n",
2244 phba->brd_no, elsiocb->iotag, 2573 phba->brd_no, vport->vpi, elsiocb->iotag,
2245 elsiocb->iocb.ulpContext); 2574 elsiocb->iocb.ulpContext);
2246 2575
2247 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2576 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2248 2577
2249 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2578 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2250 pcmd += sizeof (uint32_t); 2579 pcmd += sizeof(uint32_t);
2251 2580
2252 memset(pcmd, 0, sizeof (RNID)); 2581 memset(pcmd, 0, sizeof(RNID));
2253 rn = (RNID *) (pcmd); 2582 rn = (RNID *) (pcmd);
2254 rn->Format = format; 2583 rn->Format = format;
2255 rn->CommonLen = (2 * sizeof (struct lpfc_name)); 2584 rn->CommonLen = (2 * sizeof(struct lpfc_name));
2256 memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name)); 2585 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2257 memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); 2586 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2258 switch (format) { 2587 switch (format) {
2259 case 0: 2588 case 0:
2260 rn->SpecificLen = 0; 2589 rn->SpecificLen = 0;
2261 break; 2590 break;
2262 case RNID_TOPOLOGY_DISC: 2591 case RNID_TOPOLOGY_DISC:
2263 rn->SpecificLen = sizeof (RNID_TOP_DISC); 2592 rn->SpecificLen = sizeof(RNID_TOP_DISC);
2264 memcpy(&rn->un.topologyDisc.portName, 2593 memcpy(&rn->un.topologyDisc.portName,
2265 &phba->fc_portname, sizeof (struct lpfc_name)); 2594 &vport->fc_portname, sizeof(struct lpfc_name));
2266 rn->un.topologyDisc.unitType = RNID_HBA; 2595 rn->un.topologyDisc.unitType = RNID_HBA;
2267 rn->un.topologyDisc.physPort = 0; 2596 rn->un.topologyDisc.physPort = 0;
2268 rn->un.topologyDisc.attachedNodes = 0; 2597 rn->un.topologyDisc.attachedNodes = 0;
@@ -2273,15 +2602,17 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
2273 break; 2602 break;
2274 } 2603 }
2275 2604
2605 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2606 "Issue ACC RNID: did:x%x flg:x%x",
2607 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2608
2276 phba->fc_stat.elsXmitACC++; 2609 phba->fc_stat.elsXmitACC++;
2277 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2610 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2278 lpfc_nlp_put(ndlp); 2611 lpfc_nlp_put(ndlp);
2279 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 2612 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2280 * it could be freed */ 2613 * it could be freed */
2281 2614
2282 spin_lock_irq(phba->host->host_lock);
2283 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2615 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2284 spin_unlock_irq(phba->host->host_lock);
2285 if (rc == IOCB_ERROR) { 2616 if (rc == IOCB_ERROR) {
2286 lpfc_els_free_iocb(phba, elsiocb); 2617 lpfc_els_free_iocb(phba, elsiocb);
2287 return 1; 2618 return 1;
@@ -2290,168 +2621,153 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
2290} 2621}
2291 2622
2292int 2623int
2293lpfc_els_disc_adisc(struct lpfc_hba *phba) 2624lpfc_els_disc_adisc(struct lpfc_vport *vport)
2294{ 2625{
2295 int sentadisc; 2626 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2296 struct lpfc_nodelist *ndlp, *next_ndlp; 2627 struct lpfc_nodelist *ndlp, *next_ndlp;
2628 int sentadisc = 0;
2297 2629
2298 sentadisc = 0;
2299 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2630 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2300 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { 2631 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2301 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2632 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2302 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2633 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2303 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 2634 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2304 spin_lock_irq(phba->host->host_lock); 2635 spin_lock_irq(shost->host_lock);
2305 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2636 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2306 spin_unlock_irq(phba->host->host_lock); 2637 spin_unlock_irq(shost->host_lock);
2307 ndlp->nlp_prev_state = ndlp->nlp_state; 2638 ndlp->nlp_prev_state = ndlp->nlp_state;
2308 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 2639 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2309 lpfc_issue_els_adisc(phba, ndlp, 0); 2640 lpfc_issue_els_adisc(vport, ndlp, 0);
2310 sentadisc++; 2641 sentadisc++;
2311 phba->num_disc_nodes++; 2642 vport->num_disc_nodes++;
2312 if (phba->num_disc_nodes >= 2643 if (vport->num_disc_nodes >=
2313 phba->cfg_discovery_threads) { 2644 vport->phba->cfg_discovery_threads) {
2314 spin_lock_irq(phba->host->host_lock); 2645 spin_lock_irq(shost->host_lock);
2315 phba->fc_flag |= FC_NLP_MORE; 2646 vport->fc_flag |= FC_NLP_MORE;
2316 spin_unlock_irq(phba->host->host_lock); 2647 spin_unlock_irq(shost->host_lock);
2317 break; 2648 break;
2318 } 2649 }
2319 } 2650 }
2320 } 2651 }
2321 if (sentadisc == 0) { 2652 if (sentadisc == 0) {
2322 spin_lock_irq(phba->host->host_lock); 2653 spin_lock_irq(shost->host_lock);
2323 phba->fc_flag &= ~FC_NLP_MORE; 2654 vport->fc_flag &= ~FC_NLP_MORE;
2324 spin_unlock_irq(phba->host->host_lock); 2655 spin_unlock_irq(shost->host_lock);
2325 } 2656 }
2326 return sentadisc; 2657 return sentadisc;
2327} 2658}
2328 2659
2329int 2660int
2330lpfc_els_disc_plogi(struct lpfc_hba * phba) 2661lpfc_els_disc_plogi(struct lpfc_vport *vport)
2331{ 2662{
2332 int sentplogi; 2663 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2333 struct lpfc_nodelist *ndlp, *next_ndlp; 2664 struct lpfc_nodelist *ndlp, *next_ndlp;
2665 int sentplogi = 0;
2334 2666
2335 sentplogi = 0; 2667 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
2336 /* go thru NPR list and issue any remaining ELS PLOGIs */ 2668 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2337 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
2338 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2669 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2339 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2670 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2340 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 2671 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
2341 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 2672 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
2342 ndlp->nlp_prev_state = ndlp->nlp_state; 2673 ndlp->nlp_prev_state = ndlp->nlp_state;
2343 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 2674 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2344 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 2675 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2345 sentplogi++; 2676 sentplogi++;
2346 phba->num_disc_nodes++; 2677 vport->num_disc_nodes++;
2347 if (phba->num_disc_nodes >= 2678 if (vport->num_disc_nodes >=
2348 phba->cfg_discovery_threads) { 2679 vport->phba->cfg_discovery_threads) {
2349 spin_lock_irq(phba->host->host_lock); 2680 spin_lock_irq(shost->host_lock);
2350 phba->fc_flag |= FC_NLP_MORE; 2681 vport->fc_flag |= FC_NLP_MORE;
2351 spin_unlock_irq(phba->host->host_lock); 2682 spin_unlock_irq(shost->host_lock);
2352 break; 2683 break;
2353 } 2684 }
2354 } 2685 }
2355 } 2686 }
2356 if (sentplogi == 0) { 2687 if (sentplogi == 0) {
2357 spin_lock_irq(phba->host->host_lock); 2688 spin_lock_irq(shost->host_lock);
2358 phba->fc_flag &= ~FC_NLP_MORE; 2689 vport->fc_flag &= ~FC_NLP_MORE;
2359 spin_unlock_irq(phba->host->host_lock); 2690 spin_unlock_irq(shost->host_lock);
2360 } 2691 }
2361 return sentplogi; 2692 return sentplogi;
2362} 2693}
2363 2694
2364int 2695void
2365lpfc_els_flush_rscn(struct lpfc_hba * phba) 2696lpfc_els_flush_rscn(struct lpfc_vport *vport)
2366{ 2697{
2367 struct lpfc_dmabuf *mp; 2698 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2699 struct lpfc_hba *phba = vport->phba;
2368 int i; 2700 int i;
2369 2701
2370 for (i = 0; i < phba->fc_rscn_id_cnt; i++) { 2702 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2371 mp = phba->fc_rscn_id_list[i]; 2703 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2372 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2704 vport->fc_rscn_id_list[i] = NULL;
2373 kfree(mp); 2705 }
2374 phba->fc_rscn_id_list[i] = NULL; 2706 spin_lock_irq(shost->host_lock);
2375 } 2707 vport->fc_rscn_id_cnt = 0;
2376 phba->fc_rscn_id_cnt = 0; 2708 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2377 spin_lock_irq(phba->host->host_lock); 2709 spin_unlock_irq(shost->host_lock);
2378 phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 2710 lpfc_can_disctmo(vport);
2379 spin_unlock_irq(phba->host->host_lock);
2380 lpfc_can_disctmo(phba);
2381 return 0;
2382} 2711}
2383 2712
2384int 2713int
2385lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did) 2714lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2386{ 2715{
2387 D_ID ns_did; 2716 D_ID ns_did;
2388 D_ID rscn_did; 2717 D_ID rscn_did;
2389 struct lpfc_dmabuf *mp;
2390 uint32_t *lp; 2718 uint32_t *lp;
2391 uint32_t payload_len, cmd, i, match; 2719 uint32_t payload_len, i;
2720 struct lpfc_hba *phba = vport->phba;
2392 2721
2393 ns_did.un.word = did; 2722 ns_did.un.word = did;
2394 match = 0;
2395 2723
2396 /* Never match fabric nodes for RSCNs */ 2724 /* Never match fabric nodes for RSCNs */
2397 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 2725 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2398 return(0); 2726 return 0;
2399 2727
2400 /* If we are doing a FULL RSCN rediscovery, match everything */ 2728 /* If we are doing a FULL RSCN rediscovery, match everything */
2401 if (phba->fc_flag & FC_RSCN_DISCOVERY) { 2729 if (vport->fc_flag & FC_RSCN_DISCOVERY)
2402 return did; 2730 return did;
2403 }
2404 2731
2405 for (i = 0; i < phba->fc_rscn_id_cnt; i++) { 2732 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2406 mp = phba->fc_rscn_id_list[i]; 2733 lp = vport->fc_rscn_id_list[i]->virt;
2407 lp = (uint32_t *) mp->virt; 2734 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
2408 cmd = *lp++; 2735 payload_len -= sizeof(uint32_t); /* take off word 0 */
2409 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2410 payload_len -= sizeof (uint32_t); /* take off word 0 */
2411 while (payload_len) { 2736 while (payload_len) {
2412 rscn_did.un.word = *lp++; 2737 rscn_did.un.word = be32_to_cpu(*lp++);
2413 rscn_did.un.word = be32_to_cpu(rscn_did.un.word); 2738 payload_len -= sizeof(uint32_t);
2414 payload_len -= sizeof (uint32_t);
2415 switch (rscn_did.un.b.resv) { 2739 switch (rscn_did.un.b.resv) {
2416 case 0: /* Single N_Port ID effected */ 2740 case 0: /* Single N_Port ID effected */
2417 if (ns_did.un.word == rscn_did.un.word) { 2741 if (ns_did.un.word == rscn_did.un.word)
2418 match = did; 2742 return did;
2419 }
2420 break; 2743 break;
2421 case 1: /* Whole N_Port Area effected */ 2744 case 1: /* Whole N_Port Area effected */
2422 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 2745 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
2423 && (ns_did.un.b.area == rscn_did.un.b.area)) 2746 && (ns_did.un.b.area == rscn_did.un.b.area))
2424 { 2747 return did;
2425 match = did;
2426 }
2427 break; 2748 break;
2428 case 2: /* Whole N_Port Domain effected */ 2749 case 2: /* Whole N_Port Domain effected */
2429 if (ns_did.un.b.domain == rscn_did.un.b.domain) 2750 if (ns_did.un.b.domain == rscn_did.un.b.domain)
2430 { 2751 return did;
2431 match = did;
2432 }
2433 break;
2434 case 3: /* Whole Fabric effected */
2435 match = did;
2436 break; 2752 break;
2437 default: 2753 default:
2438 /* Unknown Identifier in RSCN list */ 2754 /* Unknown Identifier in RSCN node */
2439 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2755 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2440 "%d:0217 Unknown Identifier in " 2756 "%d (%d):0217 Unknown "
2441 "RSCN payload Data: x%x\n", 2757 "Identifier in RSCN payload "
2442 phba->brd_no, rscn_did.un.word); 2758 "Data: x%x\n",
2443 break; 2759 phba->brd_no, vport->vpi,
2444 } 2760 rscn_did.un.word);
2445 if (match) { 2761 case 3: /* Whole Fabric effected */
2446 break; 2762 return did;
2447 } 2763 }
2448 } 2764 }
2449 } 2765 }
2450 return match; 2766 return 0;
2451} 2767}
2452 2768
2453static int 2769static int
2454lpfc_rscn_recovery_check(struct lpfc_hba *phba) 2770lpfc_rscn_recovery_check(struct lpfc_vport *vport)
2455{ 2771{
2456 struct lpfc_nodelist *ndlp = NULL; 2772 struct lpfc_nodelist *ndlp = NULL;
2457 2773
@@ -2459,188 +2775,261 @@ lpfc_rscn_recovery_check(struct lpfc_hba *phba)
2459 * them to NPR state. 2775 * them to NPR state.
2460 */ 2776 */
2461 2777
2462 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 2778 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2463 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || 2779 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
2464 lpfc_rscn_payload_check(phba, ndlp->nlp_DID) == 0) 2780 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
2465 continue; 2781 continue;
2466 2782
2467 lpfc_disc_state_machine(phba, ndlp, NULL, 2783 lpfc_disc_state_machine(vport, ndlp, NULL,
2468 NLP_EVT_DEVICE_RECOVERY); 2784 NLP_EVT_DEVICE_RECOVERY);
2469 2785
2470 /* 2786 /*
2471 * Make sure NLP_DELAY_TMO is NOT running after a device 2787 * Make sure NLP_DELAY_TMO is NOT running after a device
2472 * recovery event. 2788 * recovery event.
2473 */ 2789 */
2474 if (ndlp->nlp_flag & NLP_DELAY_TMO) 2790 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2475 lpfc_cancel_retry_delay_tmo(phba, ndlp); 2791 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2476 } 2792 }
2477 2793
2478 return 0; 2794 return 0;
2479} 2795}
2480 2796
2481static int 2797static int
2482lpfc_els_rcv_rscn(struct lpfc_hba * phba, 2798lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2483 struct lpfc_iocbq * cmdiocb, 2799 struct lpfc_nodelist *ndlp, uint8_t newnode)
2484 struct lpfc_nodelist * ndlp, uint8_t newnode)
2485{ 2800{
2801 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2802 struct lpfc_hba *phba = vport->phba;
2486 struct lpfc_dmabuf *pcmd; 2803 struct lpfc_dmabuf *pcmd;
2487 uint32_t *lp; 2804 struct lpfc_vport *next_vport;
2805 uint32_t *lp, *datap;
2488 IOCB_t *icmd; 2806 IOCB_t *icmd;
2489 uint32_t payload_len, cmd; 2807 uint32_t payload_len, length, nportid, *cmd;
2808 int rscn_cnt = vport->fc_rscn_id_cnt;
2809 int rscn_id = 0, hba_id = 0;
2490 int i; 2810 int i;
2491 2811
2492 icmd = &cmdiocb->iocb; 2812 icmd = &cmdiocb->iocb;
2493 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2813 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2494 lp = (uint32_t *) pcmd->virt; 2814 lp = (uint32_t *) pcmd->virt;
2495 2815
2496 cmd = *lp++; 2816 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
2497 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */ 2817 payload_len -= sizeof(uint32_t); /* take off word 0 */
2498 payload_len -= sizeof (uint32_t); /* take off word 0 */
2499 cmd &= ELS_CMD_MASK;
2500 2818
2501 /* RSCN received */ 2819 /* RSCN received */
2502 lpfc_printf_log(phba, 2820 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2503 KERN_INFO, 2821 "%d (%d):0214 RSCN received Data: x%x x%x x%x x%x\n",
2504 LOG_DISCOVERY, 2822 phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
2505 "%d:0214 RSCN received Data: x%x x%x x%x x%x\n", 2823 *lp, rscn_cnt);
2506 phba->brd_no,
2507 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
2508 2824
2509 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 2825 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2510 fc_host_post_event(phba->host, fc_get_event_number(), 2826 fc_host_post_event(shost, fc_get_event_number(),
2511 FCH_EVT_RSCN, lp[i]); 2827 FCH_EVT_RSCN, lp[i]);
2512 2828
2513 /* If we are about to begin discovery, just ACC the RSCN. 2829 /* If we are about to begin discovery, just ACC the RSCN.
2514 * Discovery processing will satisfy it. 2830 * Discovery processing will satisfy it.
2515 */ 2831 */
2516 if (phba->hba_state <= LPFC_NS_QRY) { 2832 if (vport->port_state <= LPFC_NS_QRY) {
2517 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 2833 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
2518 newnode); 2834 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
2835 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
2836
2837 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2838 newnode);
2519 return 0; 2839 return 0;
2520 } 2840 }
2521 2841
2842 /* If this RSCN just contains NPortIDs for other vports on this HBA,
2843 * just ACC and ignore it.
2844 */
2845 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2846 !(phba->cfg_peer_port_login)) {
2847 i = payload_len;
2848 datap = lp;
2849 while (i > 0) {
2850 nportid = *datap++;
2851 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
2852 i -= sizeof(uint32_t);
2853 rscn_id++;
2854 list_for_each_entry(next_vport, &phba->port_list,
2855 listentry) {
2856 if (nportid == next_vport->fc_myDID) {
2857 hba_id++;
2858 break;
2859 }
2860 }
2861 }
2862 if (rscn_id == hba_id) {
2863 /* ALL NPortIDs in RSCN are on HBA */
2864 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2865 "%d (%d):0214 Ignore RSCN Data: x%x x%x x%x x%x\n",
2866 phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
2867 *lp, rscn_cnt);
2868
2869 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
2870 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
2871 ndlp->nlp_DID, vport->port_state,
2872 ndlp->nlp_flag);
2873
2874 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
2875 ndlp, NULL, newnode);
2876 return 0;
2877 }
2878 }
2879
2522 /* If we are already processing an RSCN, save the received 2880 /* If we are already processing an RSCN, save the received
2523 * RSCN payload buffer, cmdiocb->context2 to process later. 2881 * RSCN payload buffer, cmdiocb->context2 to process later.
2524 */ 2882 */
2525 if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 2883 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
2526 if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) && 2884 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
2527 !(phba->fc_flag & FC_RSCN_DISCOVERY)) { 2885 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
2528 spin_lock_irq(phba->host->host_lock); 2886 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
2529 phba->fc_flag |= FC_RSCN_MODE; 2887
2530 spin_unlock_irq(phba->host->host_lock); 2888 vport->fc_flag |= FC_RSCN_DEFERRED;
2531 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd; 2889 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
2532 2890 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
2533 /* If we zero, cmdiocb->context2, the calling 2891 spin_lock_irq(shost->host_lock);
2534 * routine will not try to free it. 2892 vport->fc_flag |= FC_RSCN_MODE;
2535 */ 2893 spin_unlock_irq(shost->host_lock);
2536 cmdiocb->context2 = NULL; 2894 if (rscn_cnt) {
2895 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
2896 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
2897 }
2898 if ((rscn_cnt) &&
2899 (payload_len + length <= LPFC_BPL_SIZE)) {
2900 *cmd &= ELS_CMD_MASK;
2901 *cmd |= be32_to_cpu(payload_len + length);
2902 memcpy(((uint8_t *)cmd) + length, lp,
2903 payload_len);
2904 } else {
2905 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
2906 vport->fc_rscn_id_cnt++;
2907 /* If we zero, cmdiocb->context2, the calling
2908 * routine will not try to free it.
2909 */
2910 cmdiocb->context2 = NULL;
2911 }
2537 2912
2538 /* Deferred RSCN */ 2913 /* Deferred RSCN */
2539 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2914 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2540 "%d:0235 Deferred RSCN " 2915 "%d (%d):0235 Deferred RSCN "
2541 "Data: x%x x%x x%x\n", 2916 "Data: x%x x%x x%x\n",
2542 phba->brd_no, phba->fc_rscn_id_cnt, 2917 phba->brd_no, vport->vpi,
2543 phba->fc_flag, phba->hba_state); 2918 vport->fc_rscn_id_cnt, vport->fc_flag,
2919 vport->port_state);
2544 } else { 2920 } else {
2545 spin_lock_irq(phba->host->host_lock); 2921 spin_lock_irq(shost->host_lock);
2546 phba->fc_flag |= FC_RSCN_DISCOVERY; 2922 vport->fc_flag |= FC_RSCN_DISCOVERY;
2547 spin_unlock_irq(phba->host->host_lock); 2923 spin_unlock_irq(shost->host_lock);
2548 /* ReDiscovery RSCN */ 2924 /* ReDiscovery RSCN */
2549 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2925 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2550 "%d:0234 ReDiscovery RSCN " 2926 "%d (%d):0234 ReDiscovery RSCN "
2551 "Data: x%x x%x x%x\n", 2927 "Data: x%x x%x x%x\n",
2552 phba->brd_no, phba->fc_rscn_id_cnt, 2928 phba->brd_no, vport->vpi,
2553 phba->fc_flag, phba->hba_state); 2929 vport->fc_rscn_id_cnt, vport->fc_flag,
2930 vport->port_state);
2554 } 2931 }
2555 /* Send back ACC */ 2932 /* Send back ACC */
2556 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 2933 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2557 newnode); 2934 newnode);
2558 2935
2559 /* send RECOVERY event for ALL nodes that match RSCN payload */ 2936 /* send RECOVERY event for ALL nodes that match RSCN payload */
2560 lpfc_rscn_recovery_check(phba); 2937 lpfc_rscn_recovery_check(vport);
2938 vport->fc_flag &= ~FC_RSCN_DEFERRED;
2561 return 0; 2939 return 0;
2562 } 2940 }
2563 2941
2564 phba->fc_flag |= FC_RSCN_MODE; 2942 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
2565 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd; 2943 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
2944 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
2945
2946 spin_lock_irq(shost->host_lock);
2947 vport->fc_flag |= FC_RSCN_MODE;
2948 spin_unlock_irq(shost->host_lock);
2949 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
2566 /* 2950 /*
2567 * If we zero, cmdiocb->context2, the calling routine will 2951 * If we zero, cmdiocb->context2, the calling routine will
2568 * not try to free it. 2952 * not try to free it.
2569 */ 2953 */
2570 cmdiocb->context2 = NULL; 2954 cmdiocb->context2 = NULL;
2571 2955
2572 lpfc_set_disctmo(phba); 2956 lpfc_set_disctmo(vport);
2573 2957
2574 /* Send back ACC */ 2958 /* Send back ACC */
2575 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode); 2959 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
2576 2960
2577 /* send RECOVERY event for ALL nodes that match RSCN payload */ 2961 /* send RECOVERY event for ALL nodes that match RSCN payload */
2578 lpfc_rscn_recovery_check(phba); 2962 lpfc_rscn_recovery_check(vport);
2579 2963
2580 return lpfc_els_handle_rscn(phba); 2964 return lpfc_els_handle_rscn(vport);
2581} 2965}
2582 2966
2583int 2967int
2584lpfc_els_handle_rscn(struct lpfc_hba * phba) 2968lpfc_els_handle_rscn(struct lpfc_vport *vport)
2585{ 2969{
2586 struct lpfc_nodelist *ndlp; 2970 struct lpfc_nodelist *ndlp;
2971 struct lpfc_hba *phba = vport->phba;
2972
2973 /* Ignore RSCN if the port is being torn down. */
2974 if (vport->load_flag & FC_UNLOADING) {
2975 lpfc_els_flush_rscn(vport);
2976 return 0;
2977 }
2587 2978
2588 /* Start timer for RSCN processing */ 2979 /* Start timer for RSCN processing */
2589 lpfc_set_disctmo(phba); 2980 lpfc_set_disctmo(vport);
2590 2981
2591 /* RSCN processed */ 2982 /* RSCN processed */
2592 lpfc_printf_log(phba, 2983 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2593 KERN_INFO, 2984 "%d (%d):0215 RSCN processed Data: x%x x%x x%x x%x\n",
2594 LOG_DISCOVERY, 2985 phba->brd_no, vport->vpi,
2595 "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n", 2986 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
2596 phba->brd_no, 2987 vport->port_state);
2597 phba->fc_flag, 0, phba->fc_rscn_id_cnt,
2598 phba->hba_state);
2599 2988
2600 /* To process RSCN, first compare RSCN data with NameServer */ 2989 /* To process RSCN, first compare RSCN data with NameServer */
2601 phba->fc_ns_retry = 0; 2990 vport->fc_ns_retry = 0;
2602 ndlp = lpfc_findnode_did(phba, NameServer_DID); 2991 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2603 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 2992 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
2604 /* Good ndlp, issue CT Request to NameServer */ 2993 /* Good ndlp, issue CT Request to NameServer */
2605 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) { 2994 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
2606 /* Wait for NameServer query cmpl before we can 2995 /* Wait for NameServer query cmpl before we can
2607 continue */ 2996 continue */
2608 return 1; 2997 return 1;
2609 }
2610 } else { 2998 } else {
2611 /* If login to NameServer does not exist, issue one */ 2999 /* If login to NameServer does not exist, issue one */
2612 /* Good status, issue PLOGI to NameServer */ 3000 /* Good status, issue PLOGI to NameServer */
2613 ndlp = lpfc_findnode_did(phba, NameServer_DID); 3001 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2614 if (ndlp) { 3002 if (ndlp)
2615 /* Wait for NameServer login cmpl before we can 3003 /* Wait for NameServer login cmpl before we can
2616 continue */ 3004 continue */
2617 return 1; 3005 return 1;
2618 } 3006
2619 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3007 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2620 if (!ndlp) { 3008 if (!ndlp) {
2621 lpfc_els_flush_rscn(phba); 3009 lpfc_els_flush_rscn(vport);
2622 return 0; 3010 return 0;
2623 } else { 3011 } else {
2624 lpfc_nlp_init(phba, ndlp, NameServer_DID); 3012 lpfc_nlp_init(vport, ndlp, NameServer_DID);
2625 ndlp->nlp_type |= NLP_FABRIC; 3013 ndlp->nlp_type |= NLP_FABRIC;
2626 ndlp->nlp_prev_state = ndlp->nlp_state; 3014 ndlp->nlp_prev_state = ndlp->nlp_state;
2627 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 3015 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2628 lpfc_issue_els_plogi(phba, NameServer_DID, 0); 3016 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
2629 /* Wait for NameServer login cmpl before we can 3017 /* Wait for NameServer login cmpl before we can
2630 continue */ 3018 continue */
2631 return 1; 3019 return 1;
2632 } 3020 }
2633 } 3021 }
2634 3022
2635 lpfc_els_flush_rscn(phba); 3023 lpfc_els_flush_rscn(vport);
2636 return 0; 3024 return 0;
2637} 3025}
2638 3026
2639static int 3027static int
2640lpfc_els_rcv_flogi(struct lpfc_hba * phba, 3028lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2641 struct lpfc_iocbq * cmdiocb, 3029 struct lpfc_nodelist *ndlp, uint8_t newnode)
2642 struct lpfc_nodelist * ndlp, uint8_t newnode)
2643{ 3030{
3031 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3032 struct lpfc_hba *phba = vport->phba;
2644 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3033 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2645 uint32_t *lp = (uint32_t *) pcmd->virt; 3034 uint32_t *lp = (uint32_t *) pcmd->virt;
2646 IOCB_t *icmd = &cmdiocb->iocb; 3035 IOCB_t *icmd = &cmdiocb->iocb;
@@ -2655,7 +3044,7 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2655 3044
2656 /* FLOGI received */ 3045 /* FLOGI received */
2657 3046
2658 lpfc_set_disctmo(phba); 3047 lpfc_set_disctmo(vport);
2659 3048
2660 if (phba->fc_topology == TOPOLOGY_LOOP) { 3049 if (phba->fc_topology == TOPOLOGY_LOOP) {
2661 /* We should never receive a FLOGI in loop mode, ignore it */ 3050 /* We should never receive a FLOGI in loop mode, ignore it */
@@ -2664,33 +3053,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2664 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 3053 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
2665 Loop Mode */ 3054 Loop Mode */
2666 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 3055 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
2667 "%d:0113 An FLOGI ELS command x%x was received " 3056 "%d (%d):0113 An FLOGI ELS command x%x was "
2668 "from DID x%x in Loop Mode\n", 3057 "received from DID x%x in Loop Mode\n",
2669 phba->brd_no, cmd, did); 3058 phba->brd_no, vport->vpi, cmd, did);
2670 return 1; 3059 return 1;
2671 } 3060 }
2672 3061
2673 did = Fabric_DID; 3062 did = Fabric_DID;
2674 3063
2675 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) { 3064 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
2676 /* For a FLOGI we accept, then if our portname is greater 3065 /* For a FLOGI we accept, then if our portname is greater
2677 * then the remote portname we initiate Nport login. 3066 * then the remote portname we initiate Nport login.
2678 */ 3067 */
2679 3068
2680 rc = memcmp(&phba->fc_portname, &sp->portName, 3069 rc = memcmp(&vport->fc_portname, &sp->portName,
2681 sizeof (struct lpfc_name)); 3070 sizeof(struct lpfc_name));
2682 3071
2683 if (!rc) { 3072 if (!rc) {
2684 if ((mbox = mempool_alloc(phba->mbox_mem_pool, 3073 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2685 GFP_KERNEL)) == 0) { 3074 if (!mbox)
2686 return 1; 3075 return 1;
2687 } 3076
2688 lpfc_linkdown(phba); 3077 lpfc_linkdown(phba);
2689 lpfc_init_link(phba, mbox, 3078 lpfc_init_link(phba, mbox,
2690 phba->cfg_topology, 3079 phba->cfg_topology,
2691 phba->cfg_link_speed); 3080 phba->cfg_link_speed);
2692 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3081 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2693 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3082 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3083 mbox->vport = vport;
2694 rc = lpfc_sli_issue_mbox 3084 rc = lpfc_sli_issue_mbox
2695 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 3085 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
2696 lpfc_set_loopback_flag(phba); 3086 lpfc_set_loopback_flag(phba);
@@ -2699,31 +3089,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2699 } 3089 }
2700 return 1; 3090 return 1;
2701 } else if (rc > 0) { /* greater than */ 3091 } else if (rc > 0) { /* greater than */
2702 spin_lock_irq(phba->host->host_lock); 3092 spin_lock_irq(shost->host_lock);
2703 phba->fc_flag |= FC_PT2PT_PLOGI; 3093 vport->fc_flag |= FC_PT2PT_PLOGI;
2704 spin_unlock_irq(phba->host->host_lock); 3094 spin_unlock_irq(shost->host_lock);
2705 } 3095 }
2706 phba->fc_flag |= FC_PT2PT; 3096 spin_lock_irq(shost->host_lock);
2707 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 3097 vport->fc_flag |= FC_PT2PT;
3098 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3099 spin_unlock_irq(shost->host_lock);
2708 } else { 3100 } else {
2709 /* Reject this request because invalid parameters */ 3101 /* Reject this request because invalid parameters */
2710 stat.un.b.lsRjtRsvd0 = 0; 3102 stat.un.b.lsRjtRsvd0 = 0;
2711 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3103 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2712 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 3104 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
2713 stat.un.b.vendorUnique = 0; 3105 stat.un.b.vendorUnique = 0;
2714 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3106 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3107 NULL);
2715 return 1; 3108 return 1;
2716 } 3109 }
2717 3110
2718 /* Send back ACC */ 3111 /* Send back ACC */
2719 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode); 3112 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
2720 3113
2721 return 0; 3114 return 0;
2722} 3115}
2723 3116
2724static int 3117static int
2725lpfc_els_rcv_rnid(struct lpfc_hba * phba, 3118lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2726 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp) 3119 struct lpfc_nodelist *ndlp)
2727{ 3120{
2728 struct lpfc_dmabuf *pcmd; 3121 struct lpfc_dmabuf *pcmd;
2729 uint32_t *lp; 3122 uint32_t *lp;
@@ -2746,7 +3139,7 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba,
2746 case 0: 3139 case 0:
2747 case RNID_TOPOLOGY_DISC: 3140 case RNID_TOPOLOGY_DISC:
2748 /* Send back ACC */ 3141 /* Send back ACC */
2749 lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp); 3142 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
2750 break; 3143 break;
2751 default: 3144 default:
2752 /* Reject this request because format not supported */ 3145 /* Reject this request because format not supported */
@@ -2754,14 +3147,15 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba,
2754 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3147 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2755 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3148 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2756 stat.un.b.vendorUnique = 0; 3149 stat.un.b.vendorUnique = 0;
2757 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3150 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3151 NULL);
2758 } 3152 }
2759 return 0; 3153 return 0;
2760} 3154}
2761 3155
2762static int 3156static int
2763lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3157lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2764 struct lpfc_nodelist *ndlp) 3158 struct lpfc_nodelist *ndlp)
2765{ 3159{
2766 struct ls_rjt stat; 3160 struct ls_rjt stat;
2767 3161
@@ -2770,15 +3164,15 @@ lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2770 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3164 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2771 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3165 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2772 stat.un.b.vendorUnique = 0; 3166 stat.un.b.vendorUnique = 0;
2773 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3167 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2774 return 0; 3168 return 0;
2775} 3169}
2776 3170
2777static void 3171static void
2778lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3172lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2779{ 3173{
2780 struct lpfc_sli *psli; 3174 struct lpfc_sli *psli = &phba->sli;
2781 struct lpfc_sli_ring *pring; 3175 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2782 MAILBOX_t *mb; 3176 MAILBOX_t *mb;
2783 IOCB_t *icmd; 3177 IOCB_t *icmd;
2784 RPS_RSP *rps_rsp; 3178 RPS_RSP *rps_rsp;
@@ -2788,8 +3182,6 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2788 uint16_t xri, status; 3182 uint16_t xri, status;
2789 uint32_t cmdsize; 3183 uint32_t cmdsize;
2790 3184
2791 psli = &phba->sli;
2792 pring = &psli->ring[LPFC_ELS_RING];
2793 mb = &pmb->mb; 3185 mb = &pmb->mb;
2794 3186
2795 ndlp = (struct lpfc_nodelist *) pmb->context2; 3187 ndlp = (struct lpfc_nodelist *) pmb->context2;
@@ -2804,8 +3196,9 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2804 3196
2805 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 3197 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
2806 mempool_free(pmb, phba->mbox_mem_pool); 3198 mempool_free(pmb, phba->mbox_mem_pool);
2807 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, lpfc_max_els_tries, ndlp, 3199 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
2808 ndlp->nlp_DID, ELS_CMD_ACC); 3200 lpfc_max_els_tries, ndlp,
3201 ndlp->nlp_DID, ELS_CMD_ACC);
2809 lpfc_nlp_put(ndlp); 3202 lpfc_nlp_put(ndlp);
2810 if (!elsiocb) 3203 if (!elsiocb)
2811 return; 3204 return;
@@ -2815,14 +3208,14 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2815 3208
2816 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3209 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2817 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3210 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2818 pcmd += sizeof (uint32_t); /* Skip past command */ 3211 pcmd += sizeof(uint32_t); /* Skip past command */
2819 rps_rsp = (RPS_RSP *)pcmd; 3212 rps_rsp = (RPS_RSP *)pcmd;
2820 3213
2821 if (phba->fc_topology != TOPOLOGY_LOOP) 3214 if (phba->fc_topology != TOPOLOGY_LOOP)
2822 status = 0x10; 3215 status = 0x10;
2823 else 3216 else
2824 status = 0x8; 3217 status = 0x8;
2825 if (phba->fc_flag & FC_FABRIC) 3218 if (phba->pport->fc_flag & FC_FABRIC)
2826 status |= 0x4; 3219 status |= 0x4;
2827 3220
2828 rps_rsp->rsvd1 = 0; 3221 rps_rsp->rsvd1 = 0;
@@ -2836,25 +3229,25 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2836 3229
2837 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 3230 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2838 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3231 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2839 "%d:0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 3232 "%d (%d):0118 Xmit ELS RPS ACC response tag x%x "
2840 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 3233 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2841 phba->brd_no, elsiocb->iotag, 3234 "rpi x%x\n",
3235 phba->brd_no, ndlp->vport->vpi, elsiocb->iotag,
2842 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 3236 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2843 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 3237 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2844 3238
2845 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 3239 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2846 phba->fc_stat.elsXmitACC++; 3240 phba->fc_stat.elsXmitACC++;
2847 3241 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
2848 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
2849 lpfc_els_free_iocb(phba, elsiocb); 3242 lpfc_els_free_iocb(phba, elsiocb);
2850 }
2851 return; 3243 return;
2852} 3244}
2853 3245
2854static int 3246static int
2855lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3247lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2856 struct lpfc_nodelist * ndlp) 3248 struct lpfc_nodelist *ndlp)
2857{ 3249{
3250 struct lpfc_hba *phba = vport->phba;
2858 uint32_t *lp; 3251 uint32_t *lp;
2859 uint8_t flag; 3252 uint8_t flag;
2860 LPFC_MBOXQ_t *mbox; 3253 LPFC_MBOXQ_t *mbox;
@@ -2868,7 +3261,8 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2868 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3261 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2869 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3262 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2870 stat.un.b.vendorUnique = 0; 3263 stat.un.b.vendorUnique = 0;
2871 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3264 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3265 NULL);
2872 } 3266 }
2873 3267
2874 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3268 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2878,19 +3272,24 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2878 3272
2879 if ((flag == 0) || 3273 if ((flag == 0) ||
2880 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 3274 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2881 ((flag == 2) && (memcmp(&rps->un.portName, &phba->fc_portname, 3275 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
2882 sizeof (struct lpfc_name)) == 0))) { 3276 sizeof(struct lpfc_name)) == 0))) {
2883 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) { 3277
3278 printk("Fix me....\n");
3279 dump_stack();
3280 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
3281 if (mbox) {
2884 lpfc_read_lnk_stat(phba, mbox); 3282 lpfc_read_lnk_stat(phba, mbox);
2885 mbox->context1 = 3283 mbox->context1 =
2886 (void *)((unsigned long)cmdiocb->iocb.ulpContext); 3284 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
2887 mbox->context2 = lpfc_nlp_get(ndlp); 3285 mbox->context2 = lpfc_nlp_get(ndlp);
3286 mbox->vport = vport;
2888 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 3287 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
2889 if (lpfc_sli_issue_mbox (phba, mbox, 3288 if (lpfc_sli_issue_mbox (phba, mbox,
2890 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) { 3289 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED)
2891 /* Mbox completion will send ELS Response */ 3290 /* Mbox completion will send ELS Response */
2892 return 0; 3291 return 0;
2893 } 3292
2894 lpfc_nlp_put(ndlp); 3293 lpfc_nlp_put(ndlp);
2895 mempool_free(mbox, phba->mbox_mem_pool); 3294 mempool_free(mbox, phba->mbox_mem_pool);
2896 } 3295 }
@@ -2899,27 +3298,25 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2899 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3298 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2900 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3299 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2901 stat.un.b.vendorUnique = 0; 3300 stat.un.b.vendorUnique = 0;
2902 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3301 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2903 return 0; 3302 return 0;
2904} 3303}
2905 3304
2906static int 3305static int
2907lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, 3306lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
2908 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) 3307 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
2909{ 3308{
2910 IOCB_t *icmd; 3309 struct lpfc_hba *phba = vport->phba;
2911 IOCB_t *oldcmd; 3310 IOCB_t *icmd, *oldcmd;
2912 RPL_RSP rpl_rsp; 3311 RPL_RSP rpl_rsp;
2913 struct lpfc_iocbq *elsiocb; 3312 struct lpfc_iocbq *elsiocb;
2914 struct lpfc_sli_ring *pring; 3313 struct lpfc_sli *psli = &phba->sli;
2915 struct lpfc_sli *psli; 3314 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2916 uint8_t *pcmd; 3315 uint8_t *pcmd;
2917 3316
2918 psli = &phba->sli; 3317 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2919 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 3318 ndlp->nlp_DID, ELS_CMD_ACC);
2920 3319
2921 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2922 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2923 if (!elsiocb) 3320 if (!elsiocb)
2924 return 1; 3321 return 1;
2925 3322
@@ -2929,7 +3326,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2929 3326
2930 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3327 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2931 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3328 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2932 pcmd += sizeof (uint16_t); 3329 pcmd += sizeof(uint16_t);
2933 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 3330 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
2934 pcmd += sizeof(uint16_t); 3331 pcmd += sizeof(uint16_t);
2935 3332
@@ -2937,8 +3334,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2937 rpl_rsp.listLen = be32_to_cpu(1); 3334 rpl_rsp.listLen = be32_to_cpu(1);
2938 rpl_rsp.index = 0; 3335 rpl_rsp.index = 0;
2939 rpl_rsp.port_num_blk.portNum = 0; 3336 rpl_rsp.port_num_blk.portNum = 0;
2940 rpl_rsp.port_num_blk.portID = be32_to_cpu(phba->fc_myDID); 3337 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
2941 memcpy(&rpl_rsp.port_num_blk.portName, &phba->fc_portname, 3338 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
2942 sizeof(struct lpfc_name)); 3339 sizeof(struct lpfc_name));
2943 3340
2944 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 3341 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
@@ -2946,13 +3343,14 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2946 3343
2947 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 3344 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2948 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3345 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2949 "%d:0120 Xmit ELS RPL ACC response tag x%x xri x%x, " 3346 "%d (%d):0120 Xmit ELS RPL ACC response tag x%x "
2950 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 3347 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2951 phba->brd_no, elsiocb->iotag, 3348 "rpi x%x\n",
3349 phba->brd_no, vport->vpi, elsiocb->iotag,
2952 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 3350 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2953 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 3351 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2954 3352
2955 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 3353 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2956 3354
2957 phba->fc_stat.elsXmitACC++; 3355 phba->fc_stat.elsXmitACC++;
2958 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 3356 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
@@ -2963,8 +3361,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2963} 3361}
2964 3362
2965static int 3363static int
2966lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3364lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2967 struct lpfc_nodelist * ndlp) 3365 struct lpfc_nodelist *ndlp)
2968{ 3366{
2969 struct lpfc_dmabuf *pcmd; 3367 struct lpfc_dmabuf *pcmd;
2970 uint32_t *lp; 3368 uint32_t *lp;
@@ -2979,7 +3377,8 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2979 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3377 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2980 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3378 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2981 stat.un.b.vendorUnique = 0; 3379 stat.un.b.vendorUnique = 0;
2982 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3380 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3381 NULL);
2983 } 3382 }
2984 3383
2985 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3384 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2996,15 +3395,16 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2996 } else { 3395 } else {
2997 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 3396 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
2998 } 3397 }
2999 lpfc_els_rsp_rpl_acc(phba, cmdsize, cmdiocb, ndlp); 3398 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
3000 3399
3001 return 0; 3400 return 0;
3002} 3401}
3003 3402
3004static int 3403static int
3005lpfc_els_rcv_farp(struct lpfc_hba * phba, 3404lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3006 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp) 3405 struct lpfc_nodelist *ndlp)
3007{ 3406{
3407 struct lpfc_hba *phba = vport->phba;
3008 struct lpfc_dmabuf *pcmd; 3408 struct lpfc_dmabuf *pcmd;
3009 uint32_t *lp; 3409 uint32_t *lp;
3010 IOCB_t *icmd; 3410 IOCB_t *icmd;
@@ -3020,11 +3420,9 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
3020 fp = (FARP *) lp; 3420 fp = (FARP *) lp;
3021 3421
3022 /* FARP-REQ received from DID <did> */ 3422 /* FARP-REQ received from DID <did> */
3023 lpfc_printf_log(phba, 3423 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3024 KERN_INFO, 3424 "%d (%d):0601 FARP-REQ received from DID x%x\n",
3025 LOG_ELS, 3425 phba->brd_no, vport->vpi, did);
3026 "%d:0601 FARP-REQ received from DID x%x\n",
3027 phba->brd_no, did);
3028 3426
3029 /* We will only support match on WWPN or WWNN */ 3427 /* We will only support match on WWPN or WWNN */
3030 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 3428 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
@@ -3034,15 +3432,15 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
3034 cnt = 0; 3432 cnt = 0;
3035 /* If this FARP command is searching for my portname */ 3433 /* If this FARP command is searching for my portname */
3036 if (fp->Mflags & FARP_MATCH_PORT) { 3434 if (fp->Mflags & FARP_MATCH_PORT) {
3037 if (memcmp(&fp->RportName, &phba->fc_portname, 3435 if (memcmp(&fp->RportName, &vport->fc_portname,
3038 sizeof (struct lpfc_name)) == 0) 3436 sizeof(struct lpfc_name)) == 0)
3039 cnt = 1; 3437 cnt = 1;
3040 } 3438 }
3041 3439
3042 /* If this FARP command is searching for my nodename */ 3440 /* If this FARP command is searching for my nodename */
3043 if (fp->Mflags & FARP_MATCH_NODE) { 3441 if (fp->Mflags & FARP_MATCH_NODE) {
3044 if (memcmp(&fp->RnodeName, &phba->fc_nodename, 3442 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
3045 sizeof (struct lpfc_name)) == 0) 3443 sizeof(struct lpfc_name)) == 0)
3046 cnt = 1; 3444 cnt = 1;
3047 } 3445 }
3048 3446
@@ -3052,28 +3450,28 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
3052 /* Log back into the node before sending the FARP. */ 3450 /* Log back into the node before sending the FARP. */
3053 if (fp->Rflags & FARP_REQUEST_PLOGI) { 3451 if (fp->Rflags & FARP_REQUEST_PLOGI) {
3054 ndlp->nlp_prev_state = ndlp->nlp_state; 3452 ndlp->nlp_prev_state = ndlp->nlp_state;
3055 lpfc_nlp_set_state(phba, ndlp, 3453 lpfc_nlp_set_state(vport, ndlp,
3056 NLP_STE_PLOGI_ISSUE); 3454 NLP_STE_PLOGI_ISSUE);
3057 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 3455 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
3058 } 3456 }
3059 3457
3060 /* Send a FARP response to that node */ 3458 /* Send a FARP response to that node */
3061 if (fp->Rflags & FARP_REQUEST_FARPR) { 3459 if (fp->Rflags & FARP_REQUEST_FARPR)
3062 lpfc_issue_els_farpr(phba, did, 0); 3460 lpfc_issue_els_farpr(vport, did, 0);
3063 }
3064 } 3461 }
3065 } 3462 }
3066 return 0; 3463 return 0;
3067} 3464}
3068 3465
3069static int 3466static int
3070lpfc_els_rcv_farpr(struct lpfc_hba * phba, 3467lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3071 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp) 3468 struct lpfc_nodelist *ndlp)
3072{ 3469{
3073 struct lpfc_dmabuf *pcmd; 3470 struct lpfc_dmabuf *pcmd;
3074 uint32_t *lp; 3471 uint32_t *lp;
3075 IOCB_t *icmd; 3472 IOCB_t *icmd;
3076 uint32_t cmd, did; 3473 uint32_t cmd, did;
3474 struct lpfc_hba *phba = vport->phba;
3077 3475
3078 icmd = &cmdiocb->iocb; 3476 icmd = &cmdiocb->iocb;
3079 did = icmd->un.elsreq64.remoteID; 3477 did = icmd->un.elsreq64.remoteID;
@@ -3082,21 +3480,18 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba,
3082 3480
3083 cmd = *lp++; 3481 cmd = *lp++;
3084 /* FARP-RSP received from DID <did> */ 3482 /* FARP-RSP received from DID <did> */
3085 lpfc_printf_log(phba, 3483 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3086 KERN_INFO, 3484 "%d (%d):0600 FARP-RSP received from DID x%x\n",
3087 LOG_ELS, 3485 phba->brd_no, vport->vpi, did);
3088 "%d:0600 FARP-RSP received from DID x%x\n",
3089 phba->brd_no, did);
3090
3091 /* ACCEPT the Farp resp request */ 3486 /* ACCEPT the Farp resp request */
3092 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 3487 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
3093 3488
3094 return 0; 3489 return 0;
3095} 3490}
3096 3491
3097static int 3492static int
3098lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3493lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3099 struct lpfc_nodelist * fan_ndlp) 3494 struct lpfc_nodelist *fan_ndlp)
3100{ 3495{
3101 struct lpfc_dmabuf *pcmd; 3496 struct lpfc_dmabuf *pcmd;
3102 uint32_t *lp; 3497 uint32_t *lp;
@@ -3104,10 +3499,12 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3104 uint32_t cmd, did; 3499 uint32_t cmd, did;
3105 FAN *fp; 3500 FAN *fp;
3106 struct lpfc_nodelist *ndlp, *next_ndlp; 3501 struct lpfc_nodelist *ndlp, *next_ndlp;
3502 struct lpfc_hba *phba = vport->phba;
3107 3503
3108 /* FAN received */ 3504 /* FAN received */
3109 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", 3505 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3110 phba->brd_no); 3506 "%d (%d):0265 FAN received\n",
3507 phba->brd_no, vport->vpi);
3111 3508
3112 icmd = &cmdiocb->iocb; 3509 icmd = &cmdiocb->iocb;
3113 did = icmd->un.elsreq64.remoteID; 3510 did = icmd->un.elsreq64.remoteID;
@@ -3115,11 +3512,11 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3115 lp = (uint32_t *)pcmd->virt; 3512 lp = (uint32_t *)pcmd->virt;
3116 3513
3117 cmd = *lp++; 3514 cmd = *lp++;
3118 fp = (FAN *)lp; 3515 fp = (FAN *) lp;
3119 3516
3120 /* FAN received; Fan does not have a reply sequence */ 3517 /* FAN received; Fan does not have a reply sequence */
3121 3518
3122 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) { 3519 if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
3123 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 3520 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
3124 sizeof(struct lpfc_name)) != 0) || 3521 sizeof(struct lpfc_name)) != 0) ||
3125 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 3522 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
@@ -3130,7 +3527,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3130 */ 3527 */
3131 3528
3132 list_for_each_entry_safe(ndlp, next_ndlp, 3529 list_for_each_entry_safe(ndlp, next_ndlp,
3133 &phba->fc_nodes, nlp_listp) { 3530 &vport->fc_nodes, nlp_listp) {
3134 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3531 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3135 continue; 3532 continue;
3136 if (ndlp->nlp_type & NLP_FABRIC) { 3533 if (ndlp->nlp_type & NLP_FABRIC) {
@@ -3138,24 +3535,24 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3138 * Clean up old Fabric, Nameserver and 3535 * Clean up old Fabric, Nameserver and
3139 * other NLP_FABRIC logins 3536 * other NLP_FABRIC logins
3140 */ 3537 */
3141 lpfc_drop_node(phba, ndlp); 3538 lpfc_drop_node(vport, ndlp);
3142 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 3539 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3143 /* Fail outstanding I/O now since this 3540 /* Fail outstanding I/O now since this
3144 * device is marked for PLOGI 3541 * device is marked for PLOGI
3145 */ 3542 */
3146 lpfc_unreg_rpi(phba, ndlp); 3543 lpfc_unreg_rpi(vport, ndlp);
3147 } 3544 }
3148 } 3545 }
3149 3546
3150 phba->hba_state = LPFC_FLOGI; 3547 vport->port_state = LPFC_FLOGI;
3151 lpfc_set_disctmo(phba); 3548 lpfc_set_disctmo(vport);
3152 lpfc_initial_flogi(phba); 3549 lpfc_initial_flogi(vport);
3153 return 0; 3550 return 0;
3154 } 3551 }
3155 /* Discovery not needed, 3552 /* Discovery not needed,
3156 * move the nodes to their original state. 3553 * move the nodes to their original state.
3157 */ 3554 */
3158 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, 3555 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
3159 nlp_listp) { 3556 nlp_listp) {
3160 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3557 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3161 continue; 3558 continue;
@@ -3163,13 +3560,13 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3163 switch (ndlp->nlp_prev_state) { 3560 switch (ndlp->nlp_prev_state) {
3164 case NLP_STE_UNMAPPED_NODE: 3561 case NLP_STE_UNMAPPED_NODE:
3165 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 3562 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3166 lpfc_nlp_set_state(phba, ndlp, 3563 lpfc_nlp_set_state(vport, ndlp,
3167 NLP_STE_UNMAPPED_NODE); 3564 NLP_STE_UNMAPPED_NODE);
3168 break; 3565 break;
3169 3566
3170 case NLP_STE_MAPPED_NODE: 3567 case NLP_STE_MAPPED_NODE:
3171 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 3568 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3172 lpfc_nlp_set_state(phba, ndlp, 3569 lpfc_nlp_set_state(vport, ndlp,
3173 NLP_STE_MAPPED_NODE); 3570 NLP_STE_MAPPED_NODE);
3174 break; 3571 break;
3175 3572
@@ -3179,7 +3576,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3179 } 3576 }
3180 3577
3181 /* Start discovery - this should just do CLEAR_LA */ 3578 /* Start discovery - this should just do CLEAR_LA */
3182 lpfc_disc_start(phba); 3579 lpfc_disc_start(vport);
3183 } 3580 }
3184 return 0; 3581 return 0;
3185} 3582}
@@ -3187,42 +3584,42 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3187void 3584void
3188lpfc_els_timeout(unsigned long ptr) 3585lpfc_els_timeout(unsigned long ptr)
3189{ 3586{
3190 struct lpfc_hba *phba; 3587 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
3588 struct lpfc_hba *phba = vport->phba;
3191 unsigned long iflag; 3589 unsigned long iflag;
3192 3590
3193 phba = (struct lpfc_hba *)ptr; 3591 spin_lock_irqsave(&vport->work_port_lock, iflag);
3194 if (phba == 0) 3592 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3195 return; 3593 vport->work_port_events |= WORKER_ELS_TMO;
3196 spin_lock_irqsave(phba->host->host_lock, iflag); 3594 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3197 if (!(phba->work_hba_events & WORKER_ELS_TMO)) { 3595
3198 phba->work_hba_events |= WORKER_ELS_TMO; 3596 spin_lock_irqsave(&phba->hbalock, iflag);
3199 if (phba->work_wait) 3597 if (phba->work_wait)
3200 wake_up(phba->work_wait); 3598 lpfc_worker_wake_up(phba);
3599 spin_unlock_irqrestore(&phba->hbalock, iflag);
3201 } 3600 }
3202 spin_unlock_irqrestore(phba->host->host_lock, iflag); 3601 else
3602 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3203 return; 3603 return;
3204} 3604}
3205 3605
3206void 3606void
3207lpfc_els_timeout_handler(struct lpfc_hba *phba) 3607lpfc_els_timeout_handler(struct lpfc_vport *vport)
3208{ 3608{
3609 struct lpfc_hba *phba = vport->phba;
3209 struct lpfc_sli_ring *pring; 3610 struct lpfc_sli_ring *pring;
3210 struct lpfc_iocbq *tmp_iocb, *piocb; 3611 struct lpfc_iocbq *tmp_iocb, *piocb;
3211 IOCB_t *cmd = NULL; 3612 IOCB_t *cmd = NULL;
3212 struct lpfc_dmabuf *pcmd; 3613 struct lpfc_dmabuf *pcmd;
3213 uint32_t *elscmd; 3614 uint32_t els_command = 0;
3214 uint32_t els_command=0;
3215 uint32_t timeout; 3615 uint32_t timeout;
3216 uint32_t remote_ID; 3616 uint32_t remote_ID = 0xffffffff;
3217 3617
3218 if (phba == 0)
3219 return;
3220 spin_lock_irq(phba->host->host_lock);
3221 /* If the timer is already canceled do nothing */ 3618 /* If the timer is already canceled do nothing */
3222 if (!(phba->work_hba_events & WORKER_ELS_TMO)) { 3619 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3223 spin_unlock_irq(phba->host->host_lock);
3224 return; 3620 return;
3225 } 3621 }
3622 spin_lock_irq(&phba->hbalock);
3226 timeout = (uint32_t)(phba->fc_ratov << 1); 3623 timeout = (uint32_t)(phba->fc_ratov << 1);
3227 3624
3228 pring = &phba->sli.ring[LPFC_ELS_RING]; 3625 pring = &phba->sli.ring[LPFC_ELS_RING];
@@ -3230,63 +3627,70 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3230 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3627 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3231 cmd = &piocb->iocb; 3628 cmd = &piocb->iocb;
3232 3629
3233 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) || 3630 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
3234 (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN) || 3631 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
3235 (piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)) { 3632 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
3236 continue; 3633 continue;
3237 } 3634
3635 if (piocb->vport != vport)
3636 continue;
3637
3238 pcmd = (struct lpfc_dmabuf *) piocb->context2; 3638 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3239 if (pcmd) { 3639 if (pcmd)
3240 elscmd = (uint32_t *) (pcmd->virt); 3640 els_command = *(uint32_t *) (pcmd->virt);
3241 els_command = *elscmd;
3242 }
3243 3641
3244 if ((els_command == ELS_CMD_FARP) 3642 if (els_command == ELS_CMD_FARP ||
3245 || (els_command == ELS_CMD_FARPR)) { 3643 els_command == ELS_CMD_FARPR ||
3644 els_command == ELS_CMD_FDISC)
3645 continue;
3646
3647 if (vport != piocb->vport)
3246 continue; 3648 continue;
3247 }
3248 3649
3249 if (piocb->drvrTimeout > 0) { 3650 if (piocb->drvrTimeout > 0) {
3250 if (piocb->drvrTimeout >= timeout) { 3651 if (piocb->drvrTimeout >= timeout)
3251 piocb->drvrTimeout -= timeout; 3652 piocb->drvrTimeout -= timeout;
3252 } else { 3653 else
3253 piocb->drvrTimeout = 0; 3654 piocb->drvrTimeout = 0;
3254 }
3255 continue; 3655 continue;
3256 } 3656 }
3257 3657
3258 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) { 3658 remote_ID = 0xffffffff;
3259 struct lpfc_nodelist *ndlp; 3659 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
3260 ndlp = __lpfc_findnode_rpi(phba, cmd->ulpContext);
3261 remote_ID = ndlp->nlp_DID;
3262 } else {
3263 remote_ID = cmd->un.elsreq64.remoteID; 3660 remote_ID = cmd->un.elsreq64.remoteID;
3661 else {
3662 struct lpfc_nodelist *ndlp;
3663 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
3664 if (ndlp)
3665 remote_ID = ndlp->nlp_DID;
3264 } 3666 }
3265 3667
3266 lpfc_printf_log(phba, 3668 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3267 KERN_ERR, 3669 "%d (%d):0127 ELS timeout Data: x%x x%x x%x "
3268 LOG_ELS, 3670 "x%x\n",
3269 "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n", 3671 phba->brd_no, vport->vpi, els_command,
3270 phba->brd_no, els_command,
3271 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 3672 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
3272 3673
3273 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 3674 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3274 } 3675 }
3275 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 3676 spin_unlock_irq(&phba->hbalock);
3276 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
3277 3677
3278 spin_unlock_irq(phba->host->host_lock); 3678 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3679 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
3279} 3680}
3280 3681
3281void 3682void
3282lpfc_els_flush_cmd(struct lpfc_hba *phba) 3683lpfc_els_flush_cmd(struct lpfc_vport *vport)
3283{ 3684{
3284 LIST_HEAD(completions); 3685 LIST_HEAD(completions);
3686 struct lpfc_hba *phba = vport->phba;
3285 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3687 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3286 struct lpfc_iocbq *tmp_iocb, *piocb; 3688 struct lpfc_iocbq *tmp_iocb, *piocb;
3287 IOCB_t *cmd = NULL; 3689 IOCB_t *cmd = NULL;
3288 3690
3289 spin_lock_irq(phba->host->host_lock); 3691 lpfc_fabric_abort_vport(vport);
3692
3693 spin_lock_irq(&phba->hbalock);
3290 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 3694 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
3291 cmd = &piocb->iocb; 3695 cmd = &piocb->iocb;
3292 3696
@@ -3301,271 +3705,1042 @@ lpfc_els_flush_cmd(struct lpfc_hba *phba)
3301 cmd->ulpCommand == CMD_ABORT_XRI_CN) 3705 cmd->ulpCommand == CMD_ABORT_XRI_CN)
3302 continue; 3706 continue;
3303 3707
3708 if (piocb->vport != vport)
3709 continue;
3710
3304 list_move_tail(&piocb->list, &completions); 3711 list_move_tail(&piocb->list, &completions);
3305 pring->txq_cnt--; 3712 pring->txq_cnt--;
3306
3307 } 3713 }
3308 3714
3309 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3715 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3310 cmd = &piocb->iocb;
3311
3312 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 3716 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3313 continue; 3717 continue;
3314 } 3718 }
3315 3719
3720 if (piocb->vport != vport)
3721 continue;
3722
3316 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 3723 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3317 } 3724 }
3318 spin_unlock_irq(phba->host->host_lock); 3725 spin_unlock_irq(&phba->hbalock);
3319 3726
3320 while(!list_empty(&completions)) { 3727 while (!list_empty(&completions)) {
3321 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 3728 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
3322 cmd = &piocb->iocb; 3729 cmd = &piocb->iocb;
3323 list_del(&piocb->list); 3730 list_del_init(&piocb->list);
3324 3731
3325 if (piocb->iocb_cmpl) { 3732 if (!piocb->iocb_cmpl)
3733 lpfc_sli_release_iocbq(phba, piocb);
3734 else {
3326 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3735 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3327 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 3736 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3328 (piocb->iocb_cmpl) (phba, piocb, piocb); 3737 (piocb->iocb_cmpl) (phba, piocb, piocb);
3329 } else 3738 }
3330 lpfc_sli_release_iocbq(phba, piocb);
3331 } 3739 }
3332 3740
3333 return; 3741 return;
3334} 3742}
3335 3743
3336void 3744static void
3337lpfc_els_unsol_event(struct lpfc_hba * phba, 3745lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3338 struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb) 3746 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
3339{ 3747{
3340 struct lpfc_sli *psli;
3341 struct lpfc_nodelist *ndlp; 3748 struct lpfc_nodelist *ndlp;
3342 struct lpfc_dmabuf *mp;
3343 uint32_t *lp;
3344 IOCB_t *icmd;
3345 struct ls_rjt stat; 3749 struct ls_rjt stat;
3346 uint32_t cmd; 3750 uint32_t *payload;
3347 uint32_t did; 3751 uint32_t cmd, did, newnode, rjt_err = 0;
3348 uint32_t newnode; 3752 IOCB_t *icmd = &elsiocb->iocb;
3349 uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
3350 uint32_t rjt_err = 0;
3351
3352 psli = &phba->sli;
3353 icmd = &elsiocb->iocb;
3354
3355 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3356 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
3357 /* Not enough posted buffers; Try posting more buffers */
3358 phba->fc_stat.NoRcvBuf++;
3359 lpfc_post_buffer(phba, pring, 0, 1);
3360 return;
3361 }
3362
3363 /* If there are no BDEs associated with this IOCB,
3364 * there is nothing to do.
3365 */
3366 if (icmd->ulpBdeCount == 0)
3367 return;
3368 3753
3369 /* type of ELS cmd is first 32bit word in packet */ 3754 if (vport == NULL || elsiocb->context2 == NULL)
3370 mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
3371 cont64[0].
3372 addrHigh,
3373 icmd->un.
3374 cont64[0].addrLow));
3375 if (mp == 0) {
3376 drop_cmd = 1;
3377 goto dropit; 3755 goto dropit;
3378 }
3379 3756
3380 newnode = 0; 3757 newnode = 0;
3381 lp = (uint32_t *) mp->virt; 3758 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
3382 cmd = *lp++; 3759 cmd = *payload;
3383 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1); 3760 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
3761 lpfc_post_buffer(phba, pring, 1, 1);
3384 3762
3763 did = icmd->un.rcvels.remoteID;
3385 if (icmd->ulpStatus) { 3764 if (icmd->ulpStatus) {
3386 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3765 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3387 kfree(mp); 3766 "RCV Unsol ELS: status:x%x/x%x did:x%x",
3388 drop_cmd = 1; 3767 icmd->ulpStatus, icmd->un.ulpWord[4], did);
3389 goto dropit; 3768 goto dropit;
3390 } 3769 }
3391 3770
3392 /* Check to see if link went down during discovery */ 3771 /* Check to see if link went down during discovery */
3393 if (lpfc_els_chk_latt(phba)) { 3772 if (lpfc_els_chk_latt(vport))
3394 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3395 kfree(mp);
3396 drop_cmd = 1;
3397 goto dropit; 3773 goto dropit;
3398 }
3399 3774
3400 did = icmd->un.rcvels.remoteID; 3775 /* Ignore traffic recevied during vport shutdown. */
3401 ndlp = lpfc_findnode_did(phba, did); 3776 if (vport->load_flag & FC_UNLOADING)
3777 goto dropit;
3778
3779 ndlp = lpfc_findnode_did(vport, did);
3402 if (!ndlp) { 3780 if (!ndlp) {
3403 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3781 /* Cannot find existing Fabric ndlp, so allocate a new one */
3404 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3782 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3405 if (!ndlp) { 3783 if (!ndlp)
3406 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3407 kfree(mp);
3408 drop_cmd = 1;
3409 goto dropit; 3784 goto dropit;
3410 }
3411 3785
3412 lpfc_nlp_init(phba, ndlp, did); 3786 lpfc_nlp_init(vport, ndlp, did);
3413 newnode = 1; 3787 newnode = 1;
3414 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3788 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3415 ndlp->nlp_type |= NLP_FABRIC; 3789 ndlp->nlp_type |= NLP_FABRIC;
3416 } 3790 }
3417 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 3791 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
3418 } 3792 }
3419 3793
3420 phba->fc_stat.elsRcvFrame++; 3794 phba->fc_stat.elsRcvFrame++;
3421 if (elsiocb->context1) 3795 if (elsiocb->context1)
3422 lpfc_nlp_put(elsiocb->context1); 3796 lpfc_nlp_put(elsiocb->context1);
3423 elsiocb->context1 = lpfc_nlp_get(ndlp); 3797 elsiocb->context1 = lpfc_nlp_get(ndlp);
3424 elsiocb->context2 = mp; 3798 elsiocb->vport = vport;
3425 3799
3426 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 3800 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
3427 cmd &= ELS_CMD_MASK; 3801 cmd &= ELS_CMD_MASK;
3428 } 3802 }
3429 /* ELS command <elsCmd> received from NPORT <did> */ 3803 /* ELS command <elsCmd> received from NPORT <did> */
3430 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3804 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3431 "%d:0112 ELS command x%x received from NPORT x%x " 3805 "%d (%d):0112 ELS command x%x received from NPORT x%x "
3432 "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state); 3806 "Data: x%x\n", phba->brd_no, vport->vpi, cmd, did,
3807 vport->port_state);
3433 3808
3434 switch (cmd) { 3809 switch (cmd) {
3435 case ELS_CMD_PLOGI: 3810 case ELS_CMD_PLOGI:
3811 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3812 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
3813 did, vport->port_state, ndlp->nlp_flag);
3814
3436 phba->fc_stat.elsRcvPLOGI++; 3815 phba->fc_stat.elsRcvPLOGI++;
3437 if (phba->hba_state < LPFC_DISC_AUTH) { 3816 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
3438 rjt_err = 1; 3817
3818 if (vport->port_state < LPFC_DISC_AUTH) {
3819 rjt_err = LSRJT_UNABLE_TPC;
3439 break; 3820 break;
3440 } 3821 }
3441 ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp); 3822 lpfc_disc_state_machine(vport, ndlp, elsiocb,
3442 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); 3823 NLP_EVT_RCV_PLOGI);
3824
3443 break; 3825 break;
3444 case ELS_CMD_FLOGI: 3826 case ELS_CMD_FLOGI:
3827 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3828 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
3829 did, vport->port_state, ndlp->nlp_flag);
3830
3445 phba->fc_stat.elsRcvFLOGI++; 3831 phba->fc_stat.elsRcvFLOGI++;
3446 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); 3832 lpfc_els_rcv_flogi(vport, elsiocb, ndlp, newnode);
3447 if (newnode) 3833 if (newnode)
3448 lpfc_drop_node(phba, ndlp); 3834 lpfc_drop_node(vport, ndlp);
3449 break; 3835 break;
3450 case ELS_CMD_LOGO: 3836 case ELS_CMD_LOGO:
3837 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3838 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
3839 did, vport->port_state, ndlp->nlp_flag);
3840
3451 phba->fc_stat.elsRcvLOGO++; 3841 phba->fc_stat.elsRcvLOGO++;
3452 if (phba->hba_state < LPFC_DISC_AUTH) { 3842 if (vport->port_state < LPFC_DISC_AUTH) {
3453 rjt_err = 1; 3843 rjt_err = LSRJT_UNABLE_TPC;
3454 break; 3844 break;
3455 } 3845 }
3456 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 3846 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
3457 break; 3847 break;
3458 case ELS_CMD_PRLO: 3848 case ELS_CMD_PRLO:
3849 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3850 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
3851 did, vport->port_state, ndlp->nlp_flag);
3852
3459 phba->fc_stat.elsRcvPRLO++; 3853 phba->fc_stat.elsRcvPRLO++;
3460 if (phba->hba_state < LPFC_DISC_AUTH) { 3854 if (vport->port_state < LPFC_DISC_AUTH) {
3461 rjt_err = 1; 3855 rjt_err = LSRJT_UNABLE_TPC;
3462 break; 3856 break;
3463 } 3857 }
3464 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 3858 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
3465 break; 3859 break;
3466 case ELS_CMD_RSCN: 3860 case ELS_CMD_RSCN:
3467 phba->fc_stat.elsRcvRSCN++; 3861 phba->fc_stat.elsRcvRSCN++;
3468 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); 3862 lpfc_els_rcv_rscn(vport, elsiocb, ndlp, newnode);
3469 if (newnode) 3863 if (newnode)
3470 lpfc_drop_node(phba, ndlp); 3864 lpfc_drop_node(vport, ndlp);
3471 break; 3865 break;
3472 case ELS_CMD_ADISC: 3866 case ELS_CMD_ADISC:
3867 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3868 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
3869 did, vport->port_state, ndlp->nlp_flag);
3870
3473 phba->fc_stat.elsRcvADISC++; 3871 phba->fc_stat.elsRcvADISC++;
3474 if (phba->hba_state < LPFC_DISC_AUTH) { 3872 if (vport->port_state < LPFC_DISC_AUTH) {
3475 rjt_err = 1; 3873 rjt_err = LSRJT_UNABLE_TPC;
3476 break; 3874 break;
3477 } 3875 }
3478 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC); 3876 lpfc_disc_state_machine(vport, ndlp, elsiocb,
3877 NLP_EVT_RCV_ADISC);
3479 break; 3878 break;
3480 case ELS_CMD_PDISC: 3879 case ELS_CMD_PDISC:
3880 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3881 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
3882 did, vport->port_state, ndlp->nlp_flag);
3883
3481 phba->fc_stat.elsRcvPDISC++; 3884 phba->fc_stat.elsRcvPDISC++;
3482 if (phba->hba_state < LPFC_DISC_AUTH) { 3885 if (vport->port_state < LPFC_DISC_AUTH) {
3483 rjt_err = 1; 3886 rjt_err = LSRJT_UNABLE_TPC;
3484 break; 3887 break;
3485 } 3888 }
3486 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC); 3889 lpfc_disc_state_machine(vport, ndlp, elsiocb,
3890 NLP_EVT_RCV_PDISC);
3487 break; 3891 break;
3488 case ELS_CMD_FARPR: 3892 case ELS_CMD_FARPR:
3893 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3894 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
3895 did, vport->port_state, ndlp->nlp_flag);
3896
3489 phba->fc_stat.elsRcvFARPR++; 3897 phba->fc_stat.elsRcvFARPR++;
3490 lpfc_els_rcv_farpr(phba, elsiocb, ndlp); 3898 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
3491 break; 3899 break;
3492 case ELS_CMD_FARP: 3900 case ELS_CMD_FARP:
3901 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3902 "RCV FARP: did:x%x/ste:x%x flg:x%x",
3903 did, vport->port_state, ndlp->nlp_flag);
3904
3493 phba->fc_stat.elsRcvFARP++; 3905 phba->fc_stat.elsRcvFARP++;
3494 lpfc_els_rcv_farp(phba, elsiocb, ndlp); 3906 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
3495 break; 3907 break;
3496 case ELS_CMD_FAN: 3908 case ELS_CMD_FAN:
3909 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3910 "RCV FAN: did:x%x/ste:x%x flg:x%x",
3911 did, vport->port_state, ndlp->nlp_flag);
3912
3497 phba->fc_stat.elsRcvFAN++; 3913 phba->fc_stat.elsRcvFAN++;
3498 lpfc_els_rcv_fan(phba, elsiocb, ndlp); 3914 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
3499 break; 3915 break;
3500 case ELS_CMD_PRLI: 3916 case ELS_CMD_PRLI:
3917 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3918 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
3919 did, vport->port_state, ndlp->nlp_flag);
3920
3501 phba->fc_stat.elsRcvPRLI++; 3921 phba->fc_stat.elsRcvPRLI++;
3502 if (phba->hba_state < LPFC_DISC_AUTH) { 3922 if (vport->port_state < LPFC_DISC_AUTH) {
3503 rjt_err = 1; 3923 rjt_err = LSRJT_UNABLE_TPC;
3504 break; 3924 break;
3505 } 3925 }
3506 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 3926 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
3507 break; 3927 break;
3508 case ELS_CMD_LIRR: 3928 case ELS_CMD_LIRR:
3929 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3930 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
3931 did, vport->port_state, ndlp->nlp_flag);
3932
3509 phba->fc_stat.elsRcvLIRR++; 3933 phba->fc_stat.elsRcvLIRR++;
3510 lpfc_els_rcv_lirr(phba, elsiocb, ndlp); 3934 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
3511 if (newnode) 3935 if (newnode)
3512 lpfc_drop_node(phba, ndlp); 3936 lpfc_drop_node(vport, ndlp);
3513 break; 3937 break;
3514 case ELS_CMD_RPS: 3938 case ELS_CMD_RPS:
3939 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3940 "RCV RPS: did:x%x/ste:x%x flg:x%x",
3941 did, vport->port_state, ndlp->nlp_flag);
3942
3515 phba->fc_stat.elsRcvRPS++; 3943 phba->fc_stat.elsRcvRPS++;
3516 lpfc_els_rcv_rps(phba, elsiocb, ndlp); 3944 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
3517 if (newnode) 3945 if (newnode)
3518 lpfc_drop_node(phba, ndlp); 3946 lpfc_drop_node(vport, ndlp);
3519 break; 3947 break;
3520 case ELS_CMD_RPL: 3948 case ELS_CMD_RPL:
3949 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3950 "RCV RPL: did:x%x/ste:x%x flg:x%x",
3951 did, vport->port_state, ndlp->nlp_flag);
3952
3521 phba->fc_stat.elsRcvRPL++; 3953 phba->fc_stat.elsRcvRPL++;
3522 lpfc_els_rcv_rpl(phba, elsiocb, ndlp); 3954 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
3523 if (newnode) 3955 if (newnode)
3524 lpfc_drop_node(phba, ndlp); 3956 lpfc_drop_node(vport, ndlp);
3525 break; 3957 break;
3526 case ELS_CMD_RNID: 3958 case ELS_CMD_RNID:
3959 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3960 "RCV RNID: did:x%x/ste:x%x flg:x%x",
3961 did, vport->port_state, ndlp->nlp_flag);
3962
3527 phba->fc_stat.elsRcvRNID++; 3963 phba->fc_stat.elsRcvRNID++;
3528 lpfc_els_rcv_rnid(phba, elsiocb, ndlp); 3964 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
3529 if (newnode) 3965 if (newnode)
3530 lpfc_drop_node(phba, ndlp); 3966 lpfc_drop_node(vport, ndlp);
3531 break; 3967 break;
3532 default: 3968 default:
3969 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3970 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
3971 cmd, did, vport->port_state);
3972
3533 /* Unsupported ELS command, reject */ 3973 /* Unsupported ELS command, reject */
3534 rjt_err = 1; 3974 rjt_err = LSRJT_INVALID_CMD;
3535 3975
3536 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 3976 /* Unknown ELS command <elsCmd> received from NPORT <did> */
3537 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 3977 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3538 "%d:0115 Unknown ELS command x%x received from " 3978 "%d (%d):0115 Unknown ELS command x%x "
3539 "NPORT x%x\n", phba->brd_no, cmd, did); 3979 "received from NPORT x%x\n",
3980 phba->brd_no, vport->vpi, cmd, did);
3540 if (newnode) 3981 if (newnode)
3541 lpfc_drop_node(phba, ndlp); 3982 lpfc_drop_node(vport, ndlp);
3542 break; 3983 break;
3543 } 3984 }
3544 3985
3545 /* check if need to LS_RJT received ELS cmd */ 3986 /* check if need to LS_RJT received ELS cmd */
3546 if (rjt_err) { 3987 if (rjt_err) {
3547 stat.un.b.lsRjtRsvd0 = 0; 3988 memset(&stat, 0, sizeof(stat));
3548 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3989 stat.un.b.lsRjtRsnCode = rjt_err;
3549 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3990 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3550 stat.un.b.vendorUnique = 0; 3991 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
3551 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp); 3992 NULL);
3993 if (newnode)
3994 lpfc_drop_node(vport, ndlp);
3995 }
3996
3997 return;
3998
3999dropit:
4000 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4001 "%d (%d):0111 Dropping received ELS cmd "
4002 "Data: x%x x%x x%x\n",
4003 phba->brd_no, vport ? vport->vpi : 0xffff,
4004 icmd->ulpStatus, icmd->un.ulpWord[4],
4005 icmd->ulpTimeout);
4006 phba->fc_stat.elsRcvDrop++;
4007}
4008
4009static struct lpfc_vport *
4010lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
4011{
4012 struct lpfc_vport *vport;
4013
4014 list_for_each_entry(vport, &phba->port_list, listentry) {
4015 if (vport->vpi == vpi)
4016 return vport;
4017 }
4018 return NULL;
4019}
4020
4021void
4022lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4023 struct lpfc_iocbq *elsiocb)
4024{
4025 struct lpfc_vport *vport = phba->pport;
4026 IOCB_t *icmd = &elsiocb->iocb;
4027 dma_addr_t paddr;
4028 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
4029 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
4030
4031 elsiocb->context2 = NULL;
4032 elsiocb->context3 = NULL;
4033
4034 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
4035 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
4036 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
4037 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
4038 phba->fc_stat.NoRcvBuf++;
4039 /* Not enough posted buffers; Try posting more buffers */
4040 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
4041 lpfc_post_buffer(phba, pring, 0, 1);
4042 return;
4043 }
4044
4045 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4046 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
4047 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
4048 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
4049 vport = phba->pport;
4050 else {
4051 uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
4052 vport = lpfc_find_vport_by_vpid(phba, vpi);
4053 }
4054 }
4055 /* If there are no BDEs associated
4056 * with this IOCB, there is nothing to do.
4057 */
4058 if (icmd->ulpBdeCount == 0)
4059 return;
4060
4061 /* type of ELS cmd is first 32bit word
4062 * in packet
4063 */
4064 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4065 elsiocb->context2 = bdeBuf1;
4066 } else {
4067 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
4068 icmd->un.cont64[0].addrLow);
4069 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
4070 paddr);
3552 } 4071 }
3553 4072
4073 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4074 /*
4075 * The different unsolicited event handlers would tell us
4076 * if they are done with "mp" by setting context2 to NULL.
4077 */
3554 lpfc_nlp_put(elsiocb->context1); 4078 lpfc_nlp_put(elsiocb->context1);
3555 elsiocb->context1 = NULL; 4079 elsiocb->context1 = NULL;
3556 if (elsiocb->context2) { 4080 if (elsiocb->context2) {
3557 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4081 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
3558 kfree(mp); 4082 elsiocb->context2 = NULL;
3559 } 4083 }
3560dropit: 4084
3561 /* check if need to drop received ELS cmd */ 4085 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
3562 if (drop_cmd == 1) { 4086 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
4087 icmd->ulpBdeCount == 2) {
4088 elsiocb->context2 = bdeBuf2;
4089 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4090 /* free mp if we are done with it */
4091 if (elsiocb->context2) {
4092 lpfc_in_buf_free(phba, elsiocb->context2);
4093 elsiocb->context2 = NULL;
4094 }
4095 }
4096}
4097
4098void
4099lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4100{
4101 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
4102
4103 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4104 if (!ndlp) {
4105 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4106 if (!ndlp) {
4107 if (phba->fc_topology == TOPOLOGY_LOOP) {
4108 lpfc_disc_start(vport);
4109 return;
4110 }
4111 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4112 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4113 "%d (%d):0251 NameServer login: no memory\n",
4114 phba->brd_no, vport->vpi);
4115 return;
4116 }
4117 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4118 ndlp->nlp_type |= NLP_FABRIC;
4119 }
4120
4121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4122
4123 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
4124 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3563 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 4125 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3564 "%d:0111 Dropping received ELS cmd " 4126 "%d (%d):0252 Cannot issue NameServer login\n",
3565 "Data: x%x x%x x%x\n", phba->brd_no, 4127 phba->brd_no, vport->vpi);
3566 icmd->ulpStatus, icmd->un.ulpWord[4], 4128 return;
3567 icmd->ulpTimeout); 4129 }
3568 phba->fc_stat.elsRcvDrop++; 4130
4131 if (phba->cfg_fdmi_on) {
4132 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
4133 GFP_KERNEL);
4134 if (ndlp_fdmi) {
4135 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
4136 ndlp_fdmi->nlp_type |= NLP_FABRIC;
4137 ndlp_fdmi->nlp_state =
4138 NLP_STE_PLOGI_ISSUE;
4139 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
4140 0);
4141 }
4142 }
4143 return;
4144}
4145
4146static void
4147lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4148{
4149 struct lpfc_vport *vport = pmb->vport;
4150 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4151 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
4152 MAILBOX_t *mb = &pmb->mb;
4153
4154 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4155 lpfc_nlp_put(ndlp);
4156
4157 if (mb->mbxStatus) {
4158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
4159 "%d (%d):0915 Register VPI failed: 0x%x\n",
4160 phba->brd_no, vport->vpi, mb->mbxStatus);
4161
4162 switch (mb->mbxStatus) {
4163 case 0x11: /* unsupported feature */
4164 case 0x9603: /* max_vpi exceeded */
4165 /* giving up on vport registration */
4166 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4167 spin_lock_irq(shost->host_lock);
4168 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4169 spin_unlock_irq(shost->host_lock);
4170 lpfc_can_disctmo(vport);
4171 break;
4172 default:
4173 /* Try to recover from this error */
4174 lpfc_mbx_unreg_vpi(vport);
4175 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4176 lpfc_initial_fdisc(vport);
4177 break;
4178 }
4179
4180 } else {
4181 if (vport == phba->pport)
4182 lpfc_issue_fabric_reglogin(vport);
4183 else
4184 lpfc_do_scr_ns_plogi(phba, vport);
3569 } 4185 }
4186 mempool_free(pmb, phba->mbox_mem_pool);
3570 return; 4187 return;
3571} 4188}
4189
4190void
4191lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
4192 struct lpfc_nodelist *ndlp)
4193{
4194 LPFC_MBOXQ_t *mbox;
4195
4196 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4197 if (mbox) {
4198 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
4199 mbox->vport = vport;
4200 mbox->context2 = lpfc_nlp_get(ndlp);
4201 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
4202 if (lpfc_sli_issue_mbox(phba, mbox,
4203 MBX_NOWAIT | MBX_STOP_IOCB)
4204 == MBX_NOT_FINISHED) {
4205 mempool_free(mbox, phba->mbox_mem_pool);
4206 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4207
4208 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4209
4210 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
4211 "%d (%d):0253 Register VPI: Cannot send mbox\n",
4212 phba->brd_no, vport->vpi);
4213 }
4214 } else {
4215 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4216
4217 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
4218 "%d (%d):0254 Register VPI: no memory\n",
4219 phba->brd_no, vport->vpi);
4220
4221 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4222 lpfc_nlp_put(ndlp);
4223 }
4224}
4225
4226static void
4227lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4228 struct lpfc_iocbq *rspiocb)
4229{
4230 struct lpfc_vport *vport = cmdiocb->vport;
4231 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4232 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4233 struct lpfc_nodelist *np;
4234 struct lpfc_nodelist *next_np;
4235 IOCB_t *irsp = &rspiocb->iocb;
4236 struct lpfc_iocbq *piocb;
4237
4238 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
4239 "%d (%d):0123 FDISC completes. x%x/x%x prevDID: x%x\n",
4240 phba->brd_no, vport->vpi,
4241 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
4242
4243 /* Since all FDISCs are being single threaded, we
4244 * must reset the discovery timer for ALL vports
4245 * waiting to send FDISC when one completes.
4246 */
4247 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
4248 lpfc_set_disctmo(piocb->vport);
4249 }
4250
4251 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4252 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
4253 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
4254
4255 if (irsp->ulpStatus) {
4256 /* Check for retry */
4257 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
4258 goto out;
4259
4260 /* FDISC failed */
4261 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4262 "%d (%d):0124 FDISC failed. (%d/%d)\n",
4263 phba->brd_no, vport->vpi,
4264 irsp->ulpStatus, irsp->un.ulpWord[4]);
4265
4266 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
4267 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4268
4269 lpfc_nlp_put(ndlp);
4270 /* giving up on FDISC. Cancel discovery timer */
4271 lpfc_can_disctmo(vport);
4272 } else {
4273 spin_lock_irq(shost->host_lock);
4274 vport->fc_flag |= FC_FABRIC;
4275 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
4276 vport->fc_flag |= FC_PUBLIC_LOOP;
4277 spin_unlock_irq(shost->host_lock);
4278
4279 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
4280 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
4281 if ((vport->fc_prevDID != vport->fc_myDID) &&
4282 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
4283 /* If our NportID changed, we need to ensure all
4284 * remaining NPORTs get unreg_login'ed so we can
4285 * issue unreg_vpi.
4286 */
4287 list_for_each_entry_safe(np, next_np,
4288 &vport->fc_nodes, nlp_listp) {
4289 if (np->nlp_state != NLP_STE_NPR_NODE
4290 || !(np->nlp_flag & NLP_NPR_ADISC))
4291 continue;
4292 spin_lock_irq(shost->host_lock);
4293 np->nlp_flag &= ~NLP_NPR_ADISC;
4294 spin_unlock_irq(shost->host_lock);
4295 lpfc_unreg_rpi(vport, np);
4296 }
4297 lpfc_mbx_unreg_vpi(vport);
4298 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4299 }
4300
4301 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
4302 lpfc_register_new_vport(phba, vport, ndlp);
4303 else
4304 lpfc_do_scr_ns_plogi(phba, vport);
4305
4306 lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */
4307 }
4308
4309out:
4310 lpfc_els_free_iocb(phba, cmdiocb);
4311}
4312
4313int
4314lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4315 uint8_t retry)
4316{
4317 struct lpfc_hba *phba = vport->phba;
4318 IOCB_t *icmd;
4319 struct lpfc_iocbq *elsiocb;
4320 struct serv_parm *sp;
4321 uint8_t *pcmd;
4322 uint16_t cmdsize;
4323 int did = ndlp->nlp_DID;
4324 int rc;
4325
4326 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
4327 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
4328 ELS_CMD_FDISC);
4329 if (!elsiocb) {
4330 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4331
4332 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4333 "%d (%d):0255 Issue FDISC: no IOCB\n",
4334 phba->brd_no, vport->vpi);
4335 return 1;
4336 }
4337
4338 icmd = &elsiocb->iocb;
4339 icmd->un.elsreq64.myID = 0;
4340 icmd->un.elsreq64.fl = 1;
4341
4342 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
4343 icmd->ulpCt_h = 1;
4344 icmd->ulpCt_l = 0;
4345
4346 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4347 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
4348 pcmd += sizeof(uint32_t); /* CSP Word 1 */
4349 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
4350 sp = (struct serv_parm *) pcmd;
4351 /* Setup CSPs accordingly for Fabric */
4352 sp->cmn.e_d_tov = 0;
4353 sp->cmn.w2.r_a_tov = 0;
4354 sp->cls1.classValid = 0;
4355 sp->cls2.seqDelivery = 1;
4356 sp->cls3.seqDelivery = 1;
4357
4358 pcmd += sizeof(uint32_t); /* CSP Word 2 */
4359 pcmd += sizeof(uint32_t); /* CSP Word 3 */
4360 pcmd += sizeof(uint32_t); /* CSP Word 4 */
4361 pcmd += sizeof(uint32_t); /* Port Name */
4362 memcpy(pcmd, &vport->fc_portname, 8);
4363 pcmd += sizeof(uint32_t); /* Node Name */
4364 pcmd += sizeof(uint32_t); /* Node Name */
4365 memcpy(pcmd, &vport->fc_nodename, 8);
4366
4367 lpfc_set_disctmo(vport);
4368
4369 phba->fc_stat.elsXmitFDISC++;
4370 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
4371
4372 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4373 "Issue FDISC: did:x%x",
4374 did, 0, 0);
4375
4376 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
4377 if (rc == IOCB_ERROR) {
4378 lpfc_els_free_iocb(phba, elsiocb);
4379 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4380
4381 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4382 "%d (%d):0256 Issue FDISC: Cannot send IOCB\n",
4383 phba->brd_no, vport->vpi);
4384
4385 return 1;
4386 }
4387 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
4388 vport->port_state = LPFC_FDISC;
4389 return 0;
4390}
4391
4392static void
4393lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4394 struct lpfc_iocbq *rspiocb)
4395{
4396 struct lpfc_vport *vport = cmdiocb->vport;
4397 IOCB_t *irsp;
4398
4399 irsp = &rspiocb->iocb;
4400 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4401 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
4402 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
4403
4404 lpfc_els_free_iocb(phba, cmdiocb);
4405 vport->unreg_vpi_cmpl = VPORT_ERROR;
4406}
4407
4408int
4409lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4410{
4411 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4412 struct lpfc_hba *phba = vport->phba;
4413 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4414 IOCB_t *icmd;
4415 struct lpfc_iocbq *elsiocb;
4416 uint8_t *pcmd;
4417 uint16_t cmdsize;
4418
4419 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
4420 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
4421 ELS_CMD_LOGO);
4422 if (!elsiocb)
4423 return 1;
4424
4425 icmd = &elsiocb->iocb;
4426 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4427 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
4428 pcmd += sizeof(uint32_t);
4429
4430 /* Fill in LOGO payload */
4431 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
4432 pcmd += sizeof(uint32_t);
4433 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
4434
4435 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4436 "Issue LOGO npiv did:x%x flg:x%x",
4437 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4438
4439 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
4440 spin_lock_irq(shost->host_lock);
4441 ndlp->nlp_flag |= NLP_LOGO_SND;
4442 spin_unlock_irq(shost->host_lock);
4443 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
4444 spin_lock_irq(shost->host_lock);
4445 ndlp->nlp_flag &= ~NLP_LOGO_SND;
4446 spin_unlock_irq(shost->host_lock);
4447 lpfc_els_free_iocb(phba, elsiocb);
4448 return 1;
4449 }
4450 return 0;
4451}
4452
4453void
4454lpfc_fabric_block_timeout(unsigned long ptr)
4455{
4456 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4457 unsigned long iflags;
4458 uint32_t tmo_posted;
4459 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
4460 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
4461 if (!tmo_posted)
4462 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
4463 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
4464
4465 if (!tmo_posted) {
4466 spin_lock_irqsave(&phba->hbalock, iflags);
4467 if (phba->work_wait)
4468 lpfc_worker_wake_up(phba);
4469 spin_unlock_irqrestore(&phba->hbalock, iflags);
4470 }
4471}
4472
4473static void
4474lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
4475{
4476 struct lpfc_iocbq *iocb;
4477 unsigned long iflags;
4478 int ret;
4479 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4480 IOCB_t *cmd;
4481
4482repeat:
4483 iocb = NULL;
4484 spin_lock_irqsave(&phba->hbalock, iflags);
4485 /* Post any pending iocb to the SLI layer */
4486 if (atomic_read(&phba->fabric_iocb_count) == 0) {
4487 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
4488 list);
4489 if (iocb)
4490 atomic_inc(&phba->fabric_iocb_count);
4491 }
4492 spin_unlock_irqrestore(&phba->hbalock, iflags);
4493 if (iocb) {
4494 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4495 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4496 iocb->iocb_flag |= LPFC_IO_FABRIC;
4497
4498 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
4499 "Fabric sched1: ste:x%x",
4500 iocb->vport->port_state, 0, 0);
4501
4502 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4503
4504 if (ret == IOCB_ERROR) {
4505 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4506 iocb->fabric_iocb_cmpl = NULL;
4507 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4508 cmd = &iocb->iocb;
4509 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4510 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4511 iocb->iocb_cmpl(phba, iocb, iocb);
4512
4513 atomic_dec(&phba->fabric_iocb_count);
4514 goto repeat;
4515 }
4516 }
4517
4518 return;
4519}
4520
4521void
4522lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
4523{
4524 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4525
4526 lpfc_resume_fabric_iocbs(phba);
4527 return;
4528}
4529
4530static void
4531lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
4532{
4533 int blocked;
4534
4535 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4536 /* Start a timer to unblock fabric
4537 * iocbs after 100ms
4538 */
4539 if (!blocked)
4540 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
4541
4542 return;
4543}
4544
4545static void
4546lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4547 struct lpfc_iocbq *rspiocb)
4548{
4549 struct ls_rjt stat;
4550
4551 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
4552 BUG();
4553
4554 switch (rspiocb->iocb.ulpStatus) {
4555 case IOSTAT_NPORT_RJT:
4556 case IOSTAT_FABRIC_RJT:
4557 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
4558 lpfc_block_fabric_iocbs(phba);
4559 }
4560 break;
4561
4562 case IOSTAT_NPORT_BSY:
4563 case IOSTAT_FABRIC_BSY:
4564 lpfc_block_fabric_iocbs(phba);
4565 break;
4566
4567 case IOSTAT_LS_RJT:
4568 stat.un.lsRjtError =
4569 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
4570 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
4571 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
4572 lpfc_block_fabric_iocbs(phba);
4573 break;
4574 }
4575
4576 if (atomic_read(&phba->fabric_iocb_count) == 0)
4577 BUG();
4578
4579 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
4580 cmdiocb->fabric_iocb_cmpl = NULL;
4581 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
4582 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
4583
4584 atomic_dec(&phba->fabric_iocb_count);
4585 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
4586 /* Post any pending iocbs to HBA */
4587 lpfc_resume_fabric_iocbs(phba);
4588 }
4589}
4590
4591int
4592lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4593{
4594 unsigned long iflags;
4595 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4596 int ready;
4597 int ret;
4598
4599 if (atomic_read(&phba->fabric_iocb_count) > 1)
4600 BUG();
4601
4602 spin_lock_irqsave(&phba->hbalock, iflags);
4603 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
4604 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4605
4606 spin_unlock_irqrestore(&phba->hbalock, iflags);
4607 if (ready) {
4608 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4609 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4610 iocb->iocb_flag |= LPFC_IO_FABRIC;
4611
4612 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
4613 "Fabric sched2: ste:x%x",
4614 iocb->vport->port_state, 0, 0);
4615
4616 atomic_inc(&phba->fabric_iocb_count);
4617 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4618
4619 if (ret == IOCB_ERROR) {
4620 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4621 iocb->fabric_iocb_cmpl = NULL;
4622 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4623 atomic_dec(&phba->fabric_iocb_count);
4624 }
4625 } else {
4626 spin_lock_irqsave(&phba->hbalock, iflags);
4627 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
4628 spin_unlock_irqrestore(&phba->hbalock, iflags);
4629 ret = IOCB_SUCCESS;
4630 }
4631 return ret;
4632}
4633
4634
4635void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
4636{
4637 LIST_HEAD(completions);
4638 struct lpfc_hba *phba = vport->phba;
4639 struct lpfc_iocbq *tmp_iocb, *piocb;
4640 IOCB_t *cmd;
4641
4642 spin_lock_irq(&phba->hbalock);
4643 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4644 list) {
4645
4646 if (piocb->vport != vport)
4647 continue;
4648
4649 list_move_tail(&piocb->list, &completions);
4650 }
4651 spin_unlock_irq(&phba->hbalock);
4652
4653 while (!list_empty(&completions)) {
4654 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4655 list_del_init(&piocb->list);
4656
4657 cmd = &piocb->iocb;
4658 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4659 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4660 (piocb->iocb_cmpl) (phba, piocb, piocb);
4661 }
4662}
4663
4664void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
4665{
4666 LIST_HEAD(completions);
4667 struct lpfc_hba *phba = ndlp->vport->phba;
4668 struct lpfc_iocbq *tmp_iocb, *piocb;
4669 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4670 IOCB_t *cmd;
4671
4672 spin_lock_irq(&phba->hbalock);
4673 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4674 list) {
4675 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
4676
4677 list_move_tail(&piocb->list, &completions);
4678 }
4679 }
4680 spin_unlock_irq(&phba->hbalock);
4681
4682 while (!list_empty(&completions)) {
4683 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4684 list_del_init(&piocb->list);
4685
4686 cmd = &piocb->iocb;
4687 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4688 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4689 (piocb->iocb_cmpl) (phba, piocb, piocb);
4690 }
4691}
4692
4693void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
4694{
4695 LIST_HEAD(completions);
4696 struct lpfc_iocbq *piocb;
4697 IOCB_t *cmd;
4698
4699 spin_lock_irq(&phba->hbalock);
4700 list_splice_init(&phba->fabric_iocb_list, &completions);
4701 spin_unlock_irq(&phba->hbalock);
4702
4703 while (!list_empty(&completions)) {
4704 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4705 list_del_init(&piocb->list);
4706
4707 cmd = &piocb->iocb;
4708 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4709 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4710 (piocb->iocb_cmpl) (phba, piocb, piocb);
4711 }
4712}
4713
4714
4715void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
4716{
4717 LIST_HEAD(completions);
4718 struct lpfc_iocbq *tmp_iocb, *piocb;
4719 IOCB_t *cmd;
4720 struct lpfc_nodelist *ndlp;
4721
4722 spin_lock_irq(&phba->hbalock);
4723 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4724 list) {
4725
4726 cmd = &piocb->iocb;
4727 ndlp = (struct lpfc_nodelist *) piocb->context1;
4728 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
4729 ndlp != NULL &&
4730 ndlp->nlp_DID == Fabric_DID)
4731 list_move_tail(&piocb->list, &completions);
4732 }
4733 spin_unlock_irq(&phba->hbalock);
4734
4735 while (!list_empty(&completions)) {
4736 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4737 list_del_init(&piocb->list);
4738
4739 cmd = &piocb->iocb;
4740 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4741 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4742 (piocb->iocb_cmpl) (phba, piocb, piocb);
4743 }
4744}
4745
4746
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 61caa8d379e2..f2f4639eab59 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -36,6 +36,8 @@
36#include "lpfc.h" 36#include "lpfc.h"
37#include "lpfc_logmsg.h" 37#include "lpfc_logmsg.h"
38#include "lpfc_crtn.h" 38#include "lpfc_crtn.h"
39#include "lpfc_vport.h"
40#include "lpfc_debugfs.h"
39 41
40/* AlpaArray for assignment of scsid for scan-down and bind_method */ 42/* AlpaArray for assignment of scsid for scan-down and bind_method */
41static uint8_t lpfcAlpaArray[] = { 43static uint8_t lpfcAlpaArray[] = {
@@ -54,7 +56,7 @@ static uint8_t lpfcAlpaArray[] = {
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
55}; 57};
56 58
57static void lpfc_disc_timeout_handler(struct lpfc_hba *); 59static void lpfc_disc_timeout_handler(struct lpfc_vport *);
58 60
59void 61void
60lpfc_terminate_rport_io(struct fc_rport *rport) 62lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -74,14 +76,16 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
74 return; 76 return;
75 } 77 }
76 78
77 phba = ndlp->nlp_phba; 79 phba = ndlp->vport->phba;
80
81 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
82 "rport terminate: sid:x%x did:x%x flg:x%x",
83 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
78 84
79 spin_lock_irq(phba->host->host_lock);
80 if (ndlp->nlp_sid != NLP_NO_SID) { 85 if (ndlp->nlp_sid != NLP_NO_SID) {
81 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 86 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
82 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 87 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
83 } 88 }
84 spin_unlock_irq(phba->host->host_lock);
85 89
86 return; 90 return;
87} 91}
@@ -94,105 +98,213 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
94{ 98{
95 struct lpfc_rport_data *rdata; 99 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist * ndlp; 100 struct lpfc_nodelist * ndlp;
97 uint8_t *name; 101 struct lpfc_vport *vport;
98 int warn_on = 0; 102 struct lpfc_hba *phba;
99 struct lpfc_hba *phba; 103 struct completion devloss_compl;
104 struct lpfc_work_evt *evtp;
100 105
101 rdata = rport->dd_data; 106 rdata = rport->dd_data;
102 ndlp = rdata->pnode; 107 ndlp = rdata->pnode;
103 108
104 if (!ndlp) { 109 if (!ndlp) {
105 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 110 if (rport->scsi_target_id != -1) {
106 printk(KERN_ERR "Cannot find remote node" 111 printk(KERN_ERR "Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n", 112 " for rport in dev_loss_tmo_callbk x%x\n",
108 rport->port_id); 113 rport->port_id);
114 }
109 return; 115 return;
110 } 116 }
111 117
112 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 118 vport = ndlp->vport;
119 phba = vport->phba;
120
121 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
122 "rport devlosscb: sid:x%x did:x%x flg:x%x",
123 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
124
125 init_completion(&devloss_compl);
126 evtp = &ndlp->dev_loss_evt;
127
128 if (!list_empty(&evtp->evt_listp))
129 return;
130
131 spin_lock_irq(&phba->hbalock);
132 evtp->evt_arg1 = ndlp;
133 evtp->evt_arg2 = &devloss_compl;
134 evtp->evt = LPFC_EVT_DEV_LOSS;
135 list_add_tail(&evtp->evt_listp, &phba->work_list);
136 if (phba->work_wait)
137 wake_up(phba->work_wait);
138
139 spin_unlock_irq(&phba->hbalock);
140
141 wait_for_completion(&devloss_compl);
142
143 return;
144}
145
146/*
147 * This function is called from the worker thread when dev_loss_tmo
148 * expire.
149 */
150void
151lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
152{
153 struct lpfc_rport_data *rdata;
154 struct fc_rport *rport;
155 struct lpfc_vport *vport;
156 struct lpfc_hba *phba;
157 uint8_t *name;
158 int warn_on = 0;
159
160 rport = ndlp->rport;
161
162 if (!rport)
163 return;
164
165 rdata = rport->dd_data;
166 name = (uint8_t *) &ndlp->nlp_portname;
167 vport = ndlp->vport;
168 phba = vport->phba;
169
170 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
171 "rport devlosstmo:did:x%x type:x%x id:x%x",
172 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
173
174 if (!(vport->load_flag & FC_UNLOADING) &&
175 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
113 return; 176 return;
114 177
115 name = (uint8_t *)&ndlp->nlp_portname; 178 if (ndlp->nlp_type & NLP_FABRIC) {
116 phba = ndlp->nlp_phba; 179 int put_node;
180 int put_rport;
117 181
118 spin_lock_irq(phba->host->host_lock); 182 /* We will clean up these Nodes in linkup */
183 put_node = rdata->pnode != NULL;
184 put_rport = ndlp->rport != NULL;
185 rdata->pnode = NULL;
186 ndlp->rport = NULL;
187 if (put_node)
188 lpfc_nlp_put(ndlp);
189 if (put_rport)
190 put_device(&rport->dev);
191 return;
192 }
119 193
120 if (ndlp->nlp_sid != NLP_NO_SID) { 194 if (ndlp->nlp_sid != NLP_NO_SID) {
121 warn_on = 1; 195 warn_on = 1;
122 /* flush the target */ 196 /* flush the target */
123 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 197 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
124 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 198 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
125 } 199 }
126 if (phba->fc_flag & FC_UNLOADING) 200 if (vport->load_flag & FC_UNLOADING)
127 warn_on = 0; 201 warn_on = 0;
128 202
129 spin_unlock_irq(phba->host->host_lock);
130
131 if (warn_on) { 203 if (warn_on) {
132 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 204 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
133 "%d:0203 Devloss timeout on " 205 "%d (%d):0203 Devloss timeout on "
134 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 206 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
135 "NPort x%x Data: x%x x%x x%x\n", 207 "NPort x%x Data: x%x x%x x%x\n",
136 phba->brd_no, 208 phba->brd_no, vport->vpi,
137 *name, *(name+1), *(name+2), *(name+3), 209 *name, *(name+1), *(name+2), *(name+3),
138 *(name+4), *(name+5), *(name+6), *(name+7), 210 *(name+4), *(name+5), *(name+6), *(name+7),
139 ndlp->nlp_DID, ndlp->nlp_flag, 211 ndlp->nlp_DID, ndlp->nlp_flag,
140 ndlp->nlp_state, ndlp->nlp_rpi); 212 ndlp->nlp_state, ndlp->nlp_rpi);
141 } else { 213 } else {
142 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 214 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
143 "%d:0204 Devloss timeout on " 215 "%d (%d):0204 Devloss timeout on "
144 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 216 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
145 "NPort x%x Data: x%x x%x x%x\n", 217 "NPort x%x Data: x%x x%x x%x\n",
146 phba->brd_no, 218 phba->brd_no, vport->vpi,
147 *name, *(name+1), *(name+2), *(name+3), 219 *name, *(name+1), *(name+2), *(name+3),
148 *(name+4), *(name+5), *(name+6), *(name+7), 220 *(name+4), *(name+5), *(name+6), *(name+7),
149 ndlp->nlp_DID, ndlp->nlp_flag, 221 ndlp->nlp_DID, ndlp->nlp_flag,
150 ndlp->nlp_state, ndlp->nlp_rpi); 222 ndlp->nlp_state, ndlp->nlp_rpi);
151 } 223 }
152 224
153 if (!(phba->fc_flag & FC_UNLOADING) && 225 if (!(vport->load_flag & FC_UNLOADING) &&
154 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 226 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
155 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 227 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
156 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 228 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
157 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); 229 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
158 else { 230 else {
231 int put_node;
232 int put_rport;
233
234 put_node = rdata->pnode != NULL;
235 put_rport = ndlp->rport != NULL;
159 rdata->pnode = NULL; 236 rdata->pnode = NULL;
160 ndlp->rport = NULL; 237 ndlp->rport = NULL;
161 lpfc_nlp_put(ndlp); 238 if (put_node)
162 put_device(&rport->dev); 239 lpfc_nlp_put(ndlp);
240 if (put_rport)
241 put_device(&rport->dev);
163 } 242 }
243}
244
164 245
246void
247lpfc_worker_wake_up(struct lpfc_hba *phba)
248{
249 wake_up(phba->work_wait);
165 return; 250 return;
166} 251}
167 252
168static void 253static void
169lpfc_work_list_done(struct lpfc_hba * phba) 254lpfc_work_list_done(struct lpfc_hba *phba)
170{ 255{
171 struct lpfc_work_evt *evtp = NULL; 256 struct lpfc_work_evt *evtp = NULL;
172 struct lpfc_nodelist *ndlp; 257 struct lpfc_nodelist *ndlp;
258 struct lpfc_vport *vport;
173 int free_evt; 259 int free_evt;
174 260
175 spin_lock_irq(phba->host->host_lock); 261 spin_lock_irq(&phba->hbalock);
176 while(!list_empty(&phba->work_list)) { 262 while (!list_empty(&phba->work_list)) {
177 list_remove_head((&phba->work_list), evtp, typeof(*evtp), 263 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
178 evt_listp); 264 evt_listp);
179 spin_unlock_irq(phba->host->host_lock); 265 spin_unlock_irq(&phba->hbalock);
180 free_evt = 1; 266 free_evt = 1;
181 switch (evtp->evt) { 267 switch (evtp->evt) {
268 case LPFC_EVT_DEV_LOSS_DELAY:
269 free_evt = 0; /* evt is part of ndlp */
270 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
271 vport = ndlp->vport;
272 if (!vport)
273 break;
274
275 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
276 "rport devlossdly:did:x%x flg:x%x",
277 ndlp->nlp_DID, ndlp->nlp_flag, 0);
278
279 if (!(vport->load_flag & FC_UNLOADING) &&
280 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
281 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
282 lpfc_disc_state_machine(vport, ndlp, NULL,
283 NLP_EVT_DEVICE_RM);
284 }
285 break;
182 case LPFC_EVT_ELS_RETRY: 286 case LPFC_EVT_ELS_RETRY:
183 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 287 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
184 lpfc_els_retry_delay_handler(ndlp); 288 lpfc_els_retry_delay_handler(ndlp);
289 free_evt = 0; /* evt is part of ndlp */
290 break;
291 case LPFC_EVT_DEV_LOSS:
292 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
293 lpfc_nlp_get(ndlp);
294 lpfc_dev_loss_tmo_handler(ndlp);
185 free_evt = 0; 295 free_evt = 0;
296 complete((struct completion *)(evtp->evt_arg2));
297 lpfc_nlp_put(ndlp);
186 break; 298 break;
187 case LPFC_EVT_ONLINE: 299 case LPFC_EVT_ONLINE:
188 if (phba->hba_state < LPFC_LINK_DOWN) 300 if (phba->link_state < LPFC_LINK_DOWN)
189 *(int *)(evtp->evt_arg1) = lpfc_online(phba); 301 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
190 else 302 else
191 *(int *)(evtp->evt_arg1) = 0; 303 *(int *) (evtp->evt_arg1) = 0;
192 complete((struct completion *)(evtp->evt_arg2)); 304 complete((struct completion *)(evtp->evt_arg2));
193 break; 305 break;
194 case LPFC_EVT_OFFLINE_PREP: 306 case LPFC_EVT_OFFLINE_PREP:
195 if (phba->hba_state >= LPFC_LINK_DOWN) 307 if (phba->link_state >= LPFC_LINK_DOWN)
196 lpfc_offline_prep(phba); 308 lpfc_offline_prep(phba);
197 *(int *)(evtp->evt_arg1) = 0; 309 *(int *)(evtp->evt_arg1) = 0;
198 complete((struct completion *)(evtp->evt_arg2)); 310 complete((struct completion *)(evtp->evt_arg2));
@@ -218,33 +330,31 @@ lpfc_work_list_done(struct lpfc_hba * phba)
218 case LPFC_EVT_KILL: 330 case LPFC_EVT_KILL:
219 lpfc_offline(phba); 331 lpfc_offline(phba);
220 *(int *)(evtp->evt_arg1) 332 *(int *)(evtp->evt_arg1)
221 = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba); 333 = (phba->pport->stopped)
334 ? 0 : lpfc_sli_brdkill(phba);
222 lpfc_unblock_mgmt_io(phba); 335 lpfc_unblock_mgmt_io(phba);
223 complete((struct completion *)(evtp->evt_arg2)); 336 complete((struct completion *)(evtp->evt_arg2));
224 break; 337 break;
225 } 338 }
226 if (free_evt) 339 if (free_evt)
227 kfree(evtp); 340 kfree(evtp);
228 spin_lock_irq(phba->host->host_lock); 341 spin_lock_irq(&phba->hbalock);
229 } 342 }
230 spin_unlock_irq(phba->host->host_lock); 343 spin_unlock_irq(&phba->hbalock);
231 344
232} 345}
233 346
234static void 347void
235lpfc_work_done(struct lpfc_hba * phba) 348lpfc_work_done(struct lpfc_hba *phba)
236{ 349{
237 struct lpfc_sli_ring *pring; 350 struct lpfc_sli_ring *pring;
238 int i; 351 uint32_t ha_copy, status, control, work_port_events;
239 uint32_t ha_copy; 352 struct lpfc_vport *vport;
240 uint32_t control;
241 uint32_t work_hba_events;
242 353
243 spin_lock_irq(phba->host->host_lock); 354 spin_lock_irq(&phba->hbalock);
244 ha_copy = phba->work_ha; 355 ha_copy = phba->work_ha;
245 phba->work_ha = 0; 356 phba->work_ha = 0;
246 work_hba_events=phba->work_hba_events; 357 spin_unlock_irq(&phba->hbalock);
247 spin_unlock_irq(phba->host->host_lock);
248 358
249 if (ha_copy & HA_ERATT) 359 if (ha_copy & HA_ERATT)
250 lpfc_handle_eratt(phba); 360 lpfc_handle_eratt(phba);
@@ -255,66 +365,111 @@ lpfc_work_done(struct lpfc_hba * phba)
255 if (ha_copy & HA_LATT) 365 if (ha_copy & HA_LATT)
256 lpfc_handle_latt(phba); 366 lpfc_handle_latt(phba);
257 367
258 if (work_hba_events & WORKER_DISC_TMO) 368 spin_lock_irq(&phba->hbalock);
259 lpfc_disc_timeout_handler(phba); 369 list_for_each_entry(vport, &phba->port_list, listentry) {
260 370 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
261 if (work_hba_events & WORKER_ELS_TMO) 371
262 lpfc_els_timeout_handler(phba); 372 if (!scsi_host_get(shost)) {
263 373 continue;
264 if (work_hba_events & WORKER_MBOX_TMO) 374 }
265 lpfc_mbox_timeout_handler(phba); 375 spin_unlock_irq(&phba->hbalock);
266 376 work_port_events = vport->work_port_events;
267 if (work_hba_events & WORKER_FDMI_TMO) 377
268 lpfc_fdmi_tmo_handler(phba); 378 if (work_port_events & WORKER_DISC_TMO)
269 379 lpfc_disc_timeout_handler(vport);
270 spin_lock_irq(phba->host->host_lock); 380
271 phba->work_hba_events &= ~work_hba_events; 381 if (work_port_events & WORKER_ELS_TMO)
272 spin_unlock_irq(phba->host->host_lock); 382 lpfc_els_timeout_handler(vport);
273 383
274 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) { 384 if (work_port_events & WORKER_HB_TMO)
275 pring = &phba->sli.ring[i]; 385 lpfc_hb_timeout_handler(phba);
276 if ((ha_copy & HA_RXATT) 386
277 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 387 if (work_port_events & WORKER_MBOX_TMO)
278 if (pring->flag & LPFC_STOP_IOCB_MASK) { 388 lpfc_mbox_timeout_handler(phba);
279 pring->flag |= LPFC_DEFERRED_RING_EVENT; 389
280 } else { 390 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
281 lpfc_sli_handle_slow_ring_event(phba, pring, 391 lpfc_unblock_fabric_iocbs(phba);
282 (ha_copy & 392
283 HA_RXMASK)); 393 if (work_port_events & WORKER_FDMI_TMO)
284 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 394 lpfc_fdmi_timeout_handler(vport);
285 } 395
286 /* 396 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
287 * Turn on Ring interrupts 397 lpfc_ramp_down_queue_handler(phba);
288 */ 398
289 spin_lock_irq(phba->host->host_lock); 399 if (work_port_events & WORKER_RAMP_UP_QUEUE)
290 control = readl(phba->HCregaddr); 400 lpfc_ramp_up_queue_handler(phba);
291 control |= (HC_R0INT_ENA << i); 401
402 spin_lock_irq(&vport->work_port_lock);
403 vport->work_port_events &= ~work_port_events;
404 spin_unlock_irq(&vport->work_port_lock);
405 scsi_host_put(shost);
406 spin_lock_irq(&phba->hbalock);
407 }
408 spin_unlock_irq(&phba->hbalock);
409
410 pring = &phba->sli.ring[LPFC_ELS_RING];
411 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
412 status >>= (4*LPFC_ELS_RING);
413 if ((status & HA_RXMASK)
414 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
415 if (pring->flag & LPFC_STOP_IOCB_MASK) {
416 pring->flag |= LPFC_DEFERRED_RING_EVENT;
417 } else {
418 lpfc_sli_handle_slow_ring_event(phba, pring,
419 (status &
420 HA_RXMASK));
421 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
422 }
423 /*
424 * Turn on Ring interrupts
425 */
426 spin_lock_irq(&phba->hbalock);
427 control = readl(phba->HCregaddr);
428 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
429 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
292 writel(control, phba->HCregaddr); 430 writel(control, phba->HCregaddr);
293 readl(phba->HCregaddr); /* flush */ 431 readl(phba->HCregaddr); /* flush */
294 spin_unlock_irq(phba->host->host_lock);
295 } 432 }
433 spin_unlock_irq(&phba->hbalock);
296 } 434 }
297 435 lpfc_work_list_done(phba);
298 lpfc_work_list_done (phba);
299
300} 436}
301 437
302static int 438static int
303check_work_wait_done(struct lpfc_hba *phba) { 439check_work_wait_done(struct lpfc_hba *phba)
440{
441 struct lpfc_vport *vport;
442 struct lpfc_sli_ring *pring;
443 int rc = 0;
444
445 spin_lock_irq(&phba->hbalock);
446 list_for_each_entry(vport, &phba->port_list, listentry) {
447 if (vport->work_port_events) {
448 rc = 1;
449 goto exit;
450 }
451 }
304 452
305 spin_lock_irq(phba->host->host_lock); 453 if (phba->work_ha || (!list_empty(&phba->work_list)) ||
306 if (phba->work_ha ||
307 phba->work_hba_events ||
308 (!list_empty(&phba->work_list)) ||
309 kthread_should_stop()) { 454 kthread_should_stop()) {
310 spin_unlock_irq(phba->host->host_lock); 455 rc = 1;
311 return 1; 456 goto exit;
312 } else {
313 spin_unlock_irq(phba->host->host_lock);
314 return 0;
315 } 457 }
458
459 pring = &phba->sli.ring[LPFC_ELS_RING];
460 if (pring->flag & LPFC_DEFERRED_RING_EVENT)
461 rc = 1;
462exit:
463 if (rc)
464 phba->work_found++;
465 else
466 phba->work_found = 0;
467
468 spin_unlock_irq(&phba->hbalock);
469 return rc;
316} 470}
317 471
472
318int 473int
319lpfc_do_work(void *p) 474lpfc_do_work(void *p)
320{ 475{
@@ -324,11 +479,13 @@ lpfc_do_work(void *p)
324 479
325 set_user_nice(current, -20); 480 set_user_nice(current, -20);
326 phba->work_wait = &work_waitq; 481 phba->work_wait = &work_waitq;
482 phba->work_found = 0;
327 483
328 while (1) { 484 while (1) {
329 485
330 rc = wait_event_interruptible(work_waitq, 486 rc = wait_event_interruptible(work_waitq,
331 check_work_wait_done(phba)); 487 check_work_wait_done(phba));
488
332 BUG_ON(rc); 489 BUG_ON(rc);
333 490
334 if (kthread_should_stop()) 491 if (kthread_should_stop())
@@ -336,6 +493,17 @@ lpfc_do_work(void *p)
336 493
337 lpfc_work_done(phba); 494 lpfc_work_done(phba);
338 495
496 /* If there is alot of slow ring work, like during link up
497 * check_work_wait_done() may cause this thread to not give
498 * up the CPU for very long periods of time. This may cause
499 * soft lockups or other problems. To avoid these situations
500 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
501 * consecutive iterations.
502 */
503 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
504 phba->work_found = 0;
505 schedule();
506 }
339 } 507 }
340 phba->work_wait = NULL; 508 phba->work_wait = NULL;
341 return 0; 509 return 0;
@@ -347,16 +515,17 @@ lpfc_do_work(void *p)
347 * embedding it in the IOCB. 515 * embedding it in the IOCB.
348 */ 516 */
349int 517int
350lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2, 518lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
351 uint32_t evt) 519 uint32_t evt)
352{ 520{
353 struct lpfc_work_evt *evtp; 521 struct lpfc_work_evt *evtp;
522 unsigned long flags;
354 523
355 /* 524 /*
356 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will 525 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
357 * be queued to worker thread for processing 526 * be queued to worker thread for processing
358 */ 527 */
359 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL); 528 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
360 if (!evtp) 529 if (!evtp)
361 return 0; 530 return 0;
362 531
@@ -364,136 +533,210 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
364 evtp->evt_arg2 = arg2; 533 evtp->evt_arg2 = arg2;
365 evtp->evt = evt; 534 evtp->evt = evt;
366 535
367 spin_lock_irq(phba->host->host_lock); 536 spin_lock_irqsave(&phba->hbalock, flags);
368 list_add_tail(&evtp->evt_listp, &phba->work_list); 537 list_add_tail(&evtp->evt_listp, &phba->work_list);
369 if (phba->work_wait) 538 if (phba->work_wait)
370 wake_up(phba->work_wait); 539 lpfc_worker_wake_up(phba);
371 spin_unlock_irq(phba->host->host_lock); 540 spin_unlock_irqrestore(&phba->hbalock, flags);
372 541
373 return 1; 542 return 1;
374} 543}
375 544
376int 545void
377lpfc_linkdown(struct lpfc_hba *phba) 546lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
378{ 547{
379 struct lpfc_sli *psli; 548 struct lpfc_hba *phba = vport->phba;
380 struct lpfc_nodelist *ndlp, *next_ndlp; 549 struct lpfc_nodelist *ndlp, *next_ndlp;
381 LPFC_MBOXQ_t *mb; 550 int rc;
382 int rc;
383 551
384 psli = &phba->sli; 552 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
385 /* sysfs or selective reset may call this routine to clean up */ 553 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
386 if (phba->hba_state >= LPFC_LINK_DOWN) { 554 continue;
387 if (phba->hba_state == LPFC_LINK_DOWN)
388 return 0;
389 555
390 spin_lock_irq(phba->host->host_lock); 556 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
391 phba->hba_state = LPFC_LINK_DOWN; 557 lpfc_unreg_rpi(vport, ndlp);
392 spin_unlock_irq(phba->host->host_lock); 558
559 /* Leave Fabric nodes alone on link down */
560 if (!remove && ndlp->nlp_type & NLP_FABRIC)
561 continue;
562 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
563 remove
564 ? NLP_EVT_DEVICE_RM
565 : NLP_EVT_DEVICE_RECOVERY);
393 } 566 }
567 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
568 lpfc_mbx_unreg_vpi(vport);
569 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
570 }
571}
572
573static void
574lpfc_linkdown_port(struct lpfc_vport *vport)
575{
576 struct lpfc_nodelist *ndlp, *next_ndlp;
577 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
394 578
395 fc_host_post_event(phba->host, fc_get_event_number(), 579 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
396 FCH_EVT_LINKDOWN, 0);
397 580
398 /* Clean up any firmware default rpi's */ 581 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
399 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 582 "Link Down: state:x%x rtry:x%x flg:x%x",
400 lpfc_unreg_did(phba, 0xffffffff, mb); 583 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
401 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
402 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
403 == MBX_NOT_FINISHED) {
404 mempool_free( mb, phba->mbox_mem_pool);
405 }
406 }
407 584
408 /* Cleanup any outstanding RSCN activity */ 585 /* Cleanup any outstanding RSCN activity */
409 lpfc_els_flush_rscn(phba); 586 lpfc_els_flush_rscn(vport);
410 587
411 /* Cleanup any outstanding ELS commands */ 588 /* Cleanup any outstanding ELS commands */
412 lpfc_els_flush_cmd(phba); 589 lpfc_els_flush_cmd(vport);
413 590
414 /* 591 lpfc_cleanup_rpis(vport, 0);
415 * Issue a LINK DOWN event to all nodes. 592
416 */ 593 /* free any ndlp's on unused list */
417 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { 594 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
418 /* free any ndlp's on unused list */ 595 /* free any ndlp's in unused state */
419 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 596 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
420 lpfc_drop_node(phba, ndlp); 597 lpfc_drop_node(vport, ndlp);
421 else /* otherwise, force node recovery. */ 598
422 rc = lpfc_disc_state_machine(phba, ndlp, NULL, 599 /* Turn off discovery timer if its running */
423 NLP_EVT_DEVICE_RECOVERY); 600 lpfc_can_disctmo(vport);
601}
602
603int
604lpfc_linkdown(struct lpfc_hba *phba)
605{
606 struct lpfc_vport *vport = phba->pport;
607 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
608 struct lpfc_vport *port_iterator;
609 LPFC_MBOXQ_t *mb;
610
611 if (phba->link_state == LPFC_LINK_DOWN) {
612 return 0;
613 }
614 spin_lock_irq(&phba->hbalock);
615 if (phba->link_state > LPFC_LINK_DOWN) {
616 phba->link_state = LPFC_LINK_DOWN;
617 phba->pport->fc_flag &= ~FC_LBIT;
618 }
619 spin_unlock_irq(&phba->hbalock);
620
621 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
622
623 /* Issue a LINK DOWN event to all nodes */
624 lpfc_linkdown_port(port_iterator);
625 }
626
627 /* Clean up any firmware default rpi's */
628 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
629 if (mb) {
630 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
631 mb->vport = vport;
632 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
633 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
634 == MBX_NOT_FINISHED) {
635 mempool_free(mb, phba->mbox_mem_pool);
636 }
424 } 637 }
425 638
426 /* Setup myDID for link up if we are in pt2pt mode */ 639 /* Setup myDID for link up if we are in pt2pt mode */
427 if (phba->fc_flag & FC_PT2PT) { 640 if (phba->pport->fc_flag & FC_PT2PT) {
428 phba->fc_myDID = 0; 641 phba->pport->fc_myDID = 0;
429 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 642 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 if (mb) {
430 lpfc_config_link(phba, mb); 644 lpfc_config_link(phba, mb);
431 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 645 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
432 if (lpfc_sli_issue_mbox 646 mb->vport = vport;
433 (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) 647 if (lpfc_sli_issue_mbox(phba, mb,
648 (MBX_NOWAIT | MBX_STOP_IOCB))
434 == MBX_NOT_FINISHED) { 649 == MBX_NOT_FINISHED) {
435 mempool_free( mb, phba->mbox_mem_pool); 650 mempool_free(mb, phba->mbox_mem_pool);
436 } 651 }
437 } 652 }
438 spin_lock_irq(phba->host->host_lock); 653 spin_lock_irq(shost->host_lock);
439 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); 654 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
440 spin_unlock_irq(phba->host->host_lock); 655 spin_unlock_irq(shost->host_lock);
441 } 656 }
442 spin_lock_irq(phba->host->host_lock);
443 phba->fc_flag &= ~FC_LBIT;
444 spin_unlock_irq(phba->host->host_lock);
445
446 /* Turn off discovery timer if its running */
447 lpfc_can_disctmo(phba);
448 657
449 /* Must process IOCBs on all rings to handle ABORTed I/Os */
450 return 0; 658 return 0;
451} 659}
452 660
453static int 661static void
454lpfc_linkup(struct lpfc_hba *phba) 662lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
455{ 663{
456 struct lpfc_nodelist *ndlp, *next_ndlp; 664 struct lpfc_nodelist *ndlp;
457 665
458 fc_host_post_event(phba->host, fc_get_event_number(), 666 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
459 FCH_EVT_LINKUP, 0); 667 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
460 668 continue;
461 spin_lock_irq(phba->host->host_lock); 669
462 phba->hba_state = LPFC_LINK_UP; 670 if (ndlp->nlp_type & NLP_FABRIC) {
463 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 671 /* On Linkup its safe to clean up the ndlp
464 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); 672 * from Fabric connections.
465 phba->fc_flag |= FC_NDISC_ACTIVE; 673 */
466 phba->fc_ns_retry = 0; 674 if (ndlp->nlp_DID != Fabric_DID)
467 spin_unlock_irq(phba->host->host_lock); 675 lpfc_unreg_rpi(vport, ndlp);
468 676 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
469 677 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
470 if (phba->fc_flag & FC_LBIT) { 678 /* Fail outstanding IO now since device is
471 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 679 * marked for PLOGI.
472 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) { 680 */
473 if (ndlp->nlp_type & NLP_FABRIC) { 681 lpfc_unreg_rpi(vport, ndlp);
474 /*
475 * On Linkup its safe to clean up the
476 * ndlp from Fabric connections.
477 */
478 lpfc_nlp_set_state(phba, ndlp,
479 NLP_STE_UNUSED_NODE);
480 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
481 /*
482 * Fail outstanding IO now since
483 * device is marked for PLOGI.
484 */
485 lpfc_unreg_rpi(phba, ndlp);
486 }
487 }
488 } 682 }
489 } 683 }
684}
490 685
491 /* free any ndlp's on unused list */ 686static void
492 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, 687lpfc_linkup_port(struct lpfc_vport *vport)
493 nlp_listp) { 688{
689 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
690 struct lpfc_nodelist *ndlp, *next_ndlp;
691 struct lpfc_hba *phba = vport->phba;
692
693 if ((vport->load_flag & FC_UNLOADING) != 0)
694 return;
695
696 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
697 "Link Up: top:x%x speed:x%x flg:x%x",
698 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
699
700 /* If NPIV is not enabled, only bring the physical port up */
701 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
702 (vport != phba->pport))
703 return;
704
705 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
706
707 spin_lock_irq(shost->host_lock);
708 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
709 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
710 vport->fc_flag |= FC_NDISC_ACTIVE;
711 vport->fc_ns_retry = 0;
712 spin_unlock_irq(shost->host_lock);
713
714 if (vport->fc_flag & FC_LBIT)
715 lpfc_linkup_cleanup_nodes(vport);
716
717 /* free any ndlp's in unused state */
718 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
719 nlp_listp)
494 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 720 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
495 lpfc_drop_node(phba, ndlp); 721 lpfc_drop_node(vport, ndlp);
722}
723
724static int
725lpfc_linkup(struct lpfc_hba *phba)
726{
727 struct lpfc_vport *vport;
728
729 phba->link_state = LPFC_LINK_UP;
730
731 /* Unblock fabric iocbs if they are blocked */
732 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
733 del_timer_sync(&phba->fabric_block_timer);
734
735 list_for_each_entry(vport, &phba->port_list, listentry) {
736 lpfc_linkup_port(vport);
496 } 737 }
738 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
739 lpfc_issue_clear_la(phba, phba->pport);
497 740
498 return 0; 741 return 0;
499} 742}
@@ -505,14 +748,14 @@ lpfc_linkup(struct lpfc_hba *phba)
505 * handed off to the SLI layer. 748 * handed off to the SLI layer.
506 */ 749 */
507void 750void
508lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 751lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
509{ 752{
510 struct lpfc_sli *psli; 753 struct lpfc_vport *vport = pmb->vport;
511 MAILBOX_t *mb; 754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
755 struct lpfc_sli *psli = &phba->sli;
756 MAILBOX_t *mb = &pmb->mb;
512 uint32_t control; 757 uint32_t control;
513 758
514 psli = &phba->sli;
515 mb = &pmb->mb;
516 /* Since we don't do discovery right now, turn these off here */ 759 /* Since we don't do discovery right now, turn these off here */
517 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 760 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
518 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 761 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
@@ -522,69 +765,74 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
522 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 765 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
523 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ 766 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 767 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
525 "%d:0320 CLEAR_LA mbxStatus error x%x hba " 768 "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
526 "state x%x\n", 769 "state x%x\n",
527 phba->brd_no, mb->mbxStatus, phba->hba_state); 770 phba->brd_no, vport->vpi, mb->mbxStatus,
771 vport->port_state);
528 772
529 phba->hba_state = LPFC_HBA_ERROR; 773 phba->link_state = LPFC_HBA_ERROR;
530 goto out; 774 goto out;
531 } 775 }
532 776
533 if (phba->fc_flag & FC_ABORT_DISCOVERY) 777 if (vport->port_type == LPFC_PHYSICAL_PORT)
534 goto out; 778 phba->link_state = LPFC_HBA_READY;
535 779
536 phba->num_disc_nodes = 0; 780 spin_lock_irq(&phba->hbalock);
537 /* go thru NPR list and issue ELS PLOGIs */ 781 psli->sli_flag |= LPFC_PROCESS_LA;
538 if (phba->fc_npr_cnt) { 782 control = readl(phba->HCregaddr);
539 lpfc_els_disc_plogi(phba); 783 control |= HC_LAINT_ENA;
540 } 784 writel(control, phba->HCregaddr);
785 readl(phba->HCregaddr); /* flush */
786 spin_unlock_irq(&phba->hbalock);
787 return;
788
789 vport->num_disc_nodes = 0;
790 /* go thru NPR nodes and issue ELS PLOGIs */
791 if (vport->fc_npr_cnt)
792 lpfc_els_disc_plogi(vport);
541 793
542 if (!phba->num_disc_nodes) { 794 if (!vport->num_disc_nodes) {
543 spin_lock_irq(phba->host->host_lock); 795 spin_lock_irq(shost->host_lock);
544 phba->fc_flag &= ~FC_NDISC_ACTIVE; 796 vport->fc_flag &= ~FC_NDISC_ACTIVE;
545 spin_unlock_irq(phba->host->host_lock); 797 spin_unlock_irq(shost->host_lock);
546 } 798 }
547 799
548 phba->hba_state = LPFC_HBA_READY; 800 vport->port_state = LPFC_VPORT_READY;
549 801
550out: 802out:
551 /* Device Discovery completes */ 803 /* Device Discovery completes */
552 lpfc_printf_log(phba, 804 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
553 KERN_INFO, 805 "%d (%d):0225 Device Discovery completes\n",
554 LOG_DISCOVERY, 806 phba->brd_no, vport->vpi);
555 "%d:0225 Device Discovery completes\n",
556 phba->brd_no);
557 807
558 mempool_free( pmb, phba->mbox_mem_pool); 808 mempool_free(pmb, phba->mbox_mem_pool);
559 809
560 spin_lock_irq(phba->host->host_lock); 810 spin_lock_irq(shost->host_lock);
561 phba->fc_flag &= ~FC_ABORT_DISCOVERY; 811 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
562 if (phba->fc_flag & FC_ESTABLISH_LINK) { 812 spin_unlock_irq(shost->host_lock);
563 phba->fc_flag &= ~FC_ESTABLISH_LINK;
564 }
565 spin_unlock_irq(phba->host->host_lock);
566 813
567 del_timer_sync(&phba->fc_estabtmo); 814 del_timer_sync(&phba->fc_estabtmo);
568 815
569 lpfc_can_disctmo(phba); 816 lpfc_can_disctmo(vport);
570 817
571 /* turn on Link Attention interrupts */ 818 /* turn on Link Attention interrupts */
572 spin_lock_irq(phba->host->host_lock); 819
820 spin_lock_irq(&phba->hbalock);
573 psli->sli_flag |= LPFC_PROCESS_LA; 821 psli->sli_flag |= LPFC_PROCESS_LA;
574 control = readl(phba->HCregaddr); 822 control = readl(phba->HCregaddr);
575 control |= HC_LAINT_ENA; 823 control |= HC_LAINT_ENA;
576 writel(control, phba->HCregaddr); 824 writel(control, phba->HCregaddr);
577 readl(phba->HCregaddr); /* flush */ 825 readl(phba->HCregaddr); /* flush */
578 spin_unlock_irq(phba->host->host_lock); 826 spin_unlock_irq(&phba->hbalock);
579 827
580 return; 828 return;
581} 829}
582 830
831
583static void 832static void
584lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 833lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
585{ 834{
586 struct lpfc_sli *psli = &phba->sli; 835 struct lpfc_vport *vport = pmb->vport;
587 int rc;
588 836
589 if (pmb->mb.mbxStatus) 837 if (pmb->mb.mbxStatus)
590 goto out; 838 goto out;
@@ -592,154 +840,139 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
592 mempool_free(pmb, phba->mbox_mem_pool); 840 mempool_free(pmb, phba->mbox_mem_pool);
593 841
594 if (phba->fc_topology == TOPOLOGY_LOOP && 842 if (phba->fc_topology == TOPOLOGY_LOOP &&
595 phba->fc_flag & FC_PUBLIC_LOOP && 843 vport->fc_flag & FC_PUBLIC_LOOP &&
596 !(phba->fc_flag & FC_LBIT)) { 844 !(vport->fc_flag & FC_LBIT)) {
597 /* Need to wait for FAN - use discovery timer 845 /* Need to wait for FAN - use discovery timer
598 * for timeout. hba_state is identically 846 * for timeout. port_state is identically
599 * LPFC_LOCAL_CFG_LINK while waiting for FAN 847 * LPFC_LOCAL_CFG_LINK while waiting for FAN
600 */ 848 */
601 lpfc_set_disctmo(phba); 849 lpfc_set_disctmo(vport);
602 return; 850 return;
603 } 851 }
604 852
605 /* Start discovery by sending a FLOGI. hba_state is identically 853 /* Start discovery by sending a FLOGI. port_state is identically
606 * LPFC_FLOGI while waiting for FLOGI cmpl 854 * LPFC_FLOGI while waiting for FLOGI cmpl
607 */ 855 */
608 phba->hba_state = LPFC_FLOGI; 856 if (vport->port_state != LPFC_FLOGI) {
609 lpfc_set_disctmo(phba); 857 vport->port_state = LPFC_FLOGI;
610 lpfc_initial_flogi(phba); 858 lpfc_set_disctmo(vport);
859 lpfc_initial_flogi(vport);
860 }
611 return; 861 return;
612 862
613out: 863out:
614 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 864 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
615 "%d:0306 CONFIG_LINK mbxStatus error x%x " 865 "%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
616 "HBA state x%x\n", 866 "HBA state x%x\n",
617 phba->brd_no, pmb->mb.mbxStatus, phba->hba_state); 867 phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
868 vport->port_state);
618 869
619 lpfc_linkdown(phba); 870 mempool_free(pmb, phba->mbox_mem_pool);
620 871
621 phba->hba_state = LPFC_HBA_ERROR; 872 lpfc_linkdown(phba);
622 873
623 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 874 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
624 "%d:0200 CONFIG_LINK bad hba state x%x\n", 875 "%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
625 phba->brd_no, phba->hba_state); 876 phba->brd_no, vport->vpi, vport->port_state);
626 877
627 lpfc_clear_la(phba, pmb); 878 lpfc_issue_clear_la(phba, vport);
628 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
629 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
630 if (rc == MBX_NOT_FINISHED) {
631 mempool_free(pmb, phba->mbox_mem_pool);
632 lpfc_disc_flush_list(phba);
633 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
634 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
635 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
636 phba->hba_state = LPFC_HBA_READY;
637 }
638 return; 879 return;
639} 880}
640 881
641static void 882static void
642lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 883lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
643{ 884{
644 struct lpfc_sli *psli = &phba->sli;
645 MAILBOX_t *mb = &pmb->mb; 885 MAILBOX_t *mb = &pmb->mb;
646 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 886 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
887 struct lpfc_vport *vport = pmb->vport;
647 888
648 889
649 /* Check for error */ 890 /* Check for error */
650 if (mb->mbxStatus) { 891 if (mb->mbxStatus) {
651 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 892 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
652 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
653 "%d:0319 READ_SPARAM mbxStatus error x%x " 894 "%d (%d):0319 READ_SPARAM mbxStatus error x%x "
654 "hba state x%x>\n", 895 "hba state x%x>\n",
655 phba->brd_no, mb->mbxStatus, phba->hba_state); 896 phba->brd_no, vport->vpi, mb->mbxStatus,
897 vport->port_state);
656 898
657 lpfc_linkdown(phba); 899 lpfc_linkdown(phba);
658 phba->hba_state = LPFC_HBA_ERROR;
659 goto out; 900 goto out;
660 } 901 }
661 902
662 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, 903 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
663 sizeof (struct serv_parm)); 904 sizeof (struct serv_parm));
664 if (phba->cfg_soft_wwnn) 905 if (phba->cfg_soft_wwnn)
665 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn); 906 u64_to_wwn(phba->cfg_soft_wwnn,
907 vport->fc_sparam.nodeName.u.wwn);
666 if (phba->cfg_soft_wwpn) 908 if (phba->cfg_soft_wwpn)
667 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); 909 u64_to_wwn(phba->cfg_soft_wwpn,
668 memcpy((uint8_t *) & phba->fc_nodename, 910 vport->fc_sparam.portName.u.wwn);
669 (uint8_t *) & phba->fc_sparam.nodeName, 911 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
670 sizeof (struct lpfc_name)); 912 sizeof(vport->fc_nodename));
671 memcpy((uint8_t *) & phba->fc_portname, 913 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
672 (uint8_t *) & phba->fc_sparam.portName, 914 sizeof(vport->fc_portname));
673 sizeof (struct lpfc_name)); 915 if (vport->port_type == LPFC_PHYSICAL_PORT) {
916 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
917 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
918 }
919
674 lpfc_mbuf_free(phba, mp->virt, mp->phys); 920 lpfc_mbuf_free(phba, mp->virt, mp->phys);
675 kfree(mp); 921 kfree(mp);
676 mempool_free( pmb, phba->mbox_mem_pool); 922 mempool_free(pmb, phba->mbox_mem_pool);
677 return; 923 return;
678 924
679out: 925out:
680 pmb->context1 = NULL; 926 pmb->context1 = NULL;
681 lpfc_mbuf_free(phba, mp->virt, mp->phys); 927 lpfc_mbuf_free(phba, mp->virt, mp->phys);
682 kfree(mp); 928 kfree(mp);
683 if (phba->hba_state != LPFC_CLEAR_LA) { 929 lpfc_issue_clear_la(phba, vport);
684 lpfc_clear_la(phba, pmb); 930 mempool_free(pmb, phba->mbox_mem_pool);
685 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
686 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
687 == MBX_NOT_FINISHED) {
688 mempool_free( pmb, phba->mbox_mem_pool);
689 lpfc_disc_flush_list(phba);
690 psli->ring[(psli->extra_ring)].flag &=
691 ~LPFC_STOP_IOCB_EVENT;
692 psli->ring[(psli->fcp_ring)].flag &=
693 ~LPFC_STOP_IOCB_EVENT;
694 psli->ring[(psli->next_ring)].flag &=
695 ~LPFC_STOP_IOCB_EVENT;
696 phba->hba_state = LPFC_HBA_READY;
697 }
698 } else {
699 mempool_free( pmb, phba->mbox_mem_pool);
700 }
701 return; 931 return;
702} 932}
703 933
704static void 934static void
705lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 935lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
706{ 936{
707 int i; 937 struct lpfc_vport *vport = phba->pport;
708 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 938 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
939 int i;
709 struct lpfc_dmabuf *mp; 940 struct lpfc_dmabuf *mp;
710 int rc; 941 int rc;
711 942
712 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 943 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
713 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 944 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
714 945
715 spin_lock_irq(phba->host->host_lock); 946 spin_lock_irq(&phba->hbalock);
716 switch (la->UlnkSpeed) { 947 switch (la->UlnkSpeed) {
717 case LA_1GHZ_LINK: 948 case LA_1GHZ_LINK:
718 phba->fc_linkspeed = LA_1GHZ_LINK; 949 phba->fc_linkspeed = LA_1GHZ_LINK;
719 break; 950 break;
720 case LA_2GHZ_LINK: 951 case LA_2GHZ_LINK:
721 phba->fc_linkspeed = LA_2GHZ_LINK; 952 phba->fc_linkspeed = LA_2GHZ_LINK;
722 break; 953 break;
723 case LA_4GHZ_LINK: 954 case LA_4GHZ_LINK:
724 phba->fc_linkspeed = LA_4GHZ_LINK; 955 phba->fc_linkspeed = LA_4GHZ_LINK;
725 break; 956 break;
726 case LA_8GHZ_LINK: 957 case LA_8GHZ_LINK:
727 phba->fc_linkspeed = LA_8GHZ_LINK; 958 phba->fc_linkspeed = LA_8GHZ_LINK;
728 break; 959 break;
729 default: 960 default:
730 phba->fc_linkspeed = LA_UNKNW_LINK; 961 phba->fc_linkspeed = LA_UNKNW_LINK;
731 break; 962 break;
732 } 963 }
733 964
734 phba->fc_topology = la->topology; 965 phba->fc_topology = la->topology;
966 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
735 967
736 if (phba->fc_topology == TOPOLOGY_LOOP) { 968 if (phba->fc_topology == TOPOLOGY_LOOP) {
737 /* Get Loop Map information */ 969 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
738 970
971 /* Get Loop Map information */
739 if (la->il) 972 if (la->il)
740 phba->fc_flag |= FC_LBIT; 973 vport->fc_flag |= FC_LBIT;
741 974
742 phba->fc_myDID = la->granted_AL_PA; 975 vport->fc_myDID = la->granted_AL_PA;
743 i = la->un.lilpBde64.tus.f.bdeSize; 976 i = la->un.lilpBde64.tus.f.bdeSize;
744 977
745 if (i == 0) { 978 if (i == 0) {
@@ -769,29 +1002,35 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
769 } 1002 }
770 /* Link Up Event ALPA map */ 1003 /* Link Up Event ALPA map */
771 lpfc_printf_log(phba, 1004 lpfc_printf_log(phba,
772 KERN_WARNING, 1005 KERN_WARNING,
773 LOG_LINK_EVENT, 1006 LOG_LINK_EVENT,
774 "%d:1304 Link Up Event " 1007 "%d:1304 Link Up Event "
775 "ALPA map Data: x%x " 1008 "ALPA map Data: x%x "
776 "x%x x%x x%x\n", 1009 "x%x x%x x%x\n",
777 phba->brd_no, 1010 phba->brd_no,
778 un.pa.wd1, un.pa.wd2, 1011 un.pa.wd1, un.pa.wd2,
779 un.pa.wd3, un.pa.wd4); 1012 un.pa.wd3, un.pa.wd4);
780 } 1013 }
781 } 1014 }
782 } 1015 }
783 } else { 1016 } else {
784 phba->fc_myDID = phba->fc_pref_DID; 1017 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
785 phba->fc_flag |= FC_LBIT; 1018 if (phba->max_vpi && phba->cfg_npiv_enable &&
1019 (phba->sli_rev == 3))
1020 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1021 }
1022 vport->fc_myDID = phba->fc_pref_DID;
1023 vport->fc_flag |= FC_LBIT;
786 } 1024 }
787 spin_unlock_irq(phba->host->host_lock); 1025 spin_unlock_irq(&phba->hbalock);
788 1026
789 lpfc_linkup(phba); 1027 lpfc_linkup(phba);
790 if (sparam_mbox) { 1028 if (sparam_mbox) {
791 lpfc_read_sparam(phba, sparam_mbox); 1029 lpfc_read_sparam(phba, sparam_mbox, 0);
1030 sparam_mbox->vport = vport;
792 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 1031 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
793 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, 1032 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
794 (MBX_NOWAIT | MBX_STOP_IOCB)); 1033 (MBX_NOWAIT | MBX_STOP_IOCB));
795 if (rc == MBX_NOT_FINISHED) { 1034 if (rc == MBX_NOT_FINISHED) {
796 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 1035 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
797 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1036 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -799,36 +1038,48 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
799 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1038 mempool_free(sparam_mbox, phba->mbox_mem_pool);
800 if (cfglink_mbox) 1039 if (cfglink_mbox)
801 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1040 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
802 return; 1041 goto out;
803 } 1042 }
804 } 1043 }
805 1044
806 if (cfglink_mbox) { 1045 if (cfglink_mbox) {
807 phba->hba_state = LPFC_LOCAL_CFG_LINK; 1046 vport->port_state = LPFC_LOCAL_CFG_LINK;
808 lpfc_config_link(phba, cfglink_mbox); 1047 lpfc_config_link(phba, cfglink_mbox);
1048 cfglink_mbox->vport = vport;
809 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1049 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
810 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, 1050 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
811 (MBX_NOWAIT | MBX_STOP_IOCB)); 1051 (MBX_NOWAIT | MBX_STOP_IOCB));
812 if (rc == MBX_NOT_FINISHED) 1052 if (rc != MBX_NOT_FINISHED)
813 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1053 return;
1054 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
814 } 1055 }
1056out:
1057 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1058 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1059 "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1060 phba->brd_no, vport->vpi,
1061 vport->port_state, sparam_mbox, cfglink_mbox);
1062
1063 lpfc_issue_clear_la(phba, vport);
1064 return;
815} 1065}
816 1066
817static void 1067static void
818lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { 1068lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1069{
819 uint32_t control; 1070 uint32_t control;
820 struct lpfc_sli *psli = &phba->sli; 1071 struct lpfc_sli *psli = &phba->sli;
821 1072
822 lpfc_linkdown(phba); 1073 lpfc_linkdown(phba);
823 1074
824 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1075 /* turn on Link Attention interrupts - no CLEAR_LA needed */
825 spin_lock_irq(phba->host->host_lock); 1076 spin_lock_irq(&phba->hbalock);
826 psli->sli_flag |= LPFC_PROCESS_LA; 1077 psli->sli_flag |= LPFC_PROCESS_LA;
827 control = readl(phba->HCregaddr); 1078 control = readl(phba->HCregaddr);
828 control |= HC_LAINT_ENA; 1079 control |= HC_LAINT_ENA;
829 writel(control, phba->HCregaddr); 1080 writel(control, phba->HCregaddr);
830 readl(phba->HCregaddr); /* flush */ 1081 readl(phba->HCregaddr); /* flush */
831 spin_unlock_irq(phba->host->host_lock); 1082 spin_unlock_irq(&phba->hbalock);
832} 1083}
833 1084
834/* 1085/*
@@ -838,22 +1089,21 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
838 * handed off to the SLI layer. 1089 * handed off to the SLI layer.
839 */ 1090 */
840void 1091void
841lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1092lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
842{ 1093{
1094 struct lpfc_vport *vport = pmb->vport;
1095 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
843 READ_LA_VAR *la; 1096 READ_LA_VAR *la;
844 MAILBOX_t *mb = &pmb->mb; 1097 MAILBOX_t *mb = &pmb->mb;
845 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1098 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
846 1099
847 /* Check for error */ 1100 /* Check for error */
848 if (mb->mbxStatus) { 1101 if (mb->mbxStatus) {
849 lpfc_printf_log(phba, 1102 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
850 KERN_INFO,
851 LOG_LINK_EVENT,
852 "%d:1307 READ_LA mbox error x%x state x%x\n", 1103 "%d:1307 READ_LA mbox error x%x state x%x\n",
853 phba->brd_no, 1104 phba->brd_no, mb->mbxStatus, vport->port_state);
854 mb->mbxStatus, phba->hba_state);
855 lpfc_mbx_issue_link_down(phba); 1105 lpfc_mbx_issue_link_down(phba);
856 phba->hba_state = LPFC_HBA_ERROR; 1106 phba->link_state = LPFC_HBA_ERROR;
857 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1107 goto lpfc_mbx_cmpl_read_la_free_mbuf;
858 } 1108 }
859 1109
@@ -861,27 +1111,26 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
861 1111
862 memcpy(&phba->alpa_map[0], mp->virt, 128); 1112 memcpy(&phba->alpa_map[0], mp->virt, 128);
863 1113
864 spin_lock_irq(phba->host->host_lock); 1114 spin_lock_irq(shost->host_lock);
865 if (la->pb) 1115 if (la->pb)
866 phba->fc_flag |= FC_BYPASSED_MODE; 1116 vport->fc_flag |= FC_BYPASSED_MODE;
867 else 1117 else
868 phba->fc_flag &= ~FC_BYPASSED_MODE; 1118 vport->fc_flag &= ~FC_BYPASSED_MODE;
869 spin_unlock_irq(phba->host->host_lock); 1119 spin_unlock_irq(shost->host_lock);
870 1120
871 if (((phba->fc_eventTag + 1) < la->eventTag) || 1121 if (((phba->fc_eventTag + 1) < la->eventTag) ||
872 (phba->fc_eventTag == la->eventTag)) { 1122 (phba->fc_eventTag == la->eventTag)) {
873 phba->fc_stat.LinkMultiEvent++; 1123 phba->fc_stat.LinkMultiEvent++;
874 if (la->attType == AT_LINK_UP) { 1124 if (la->attType == AT_LINK_UP)
875 if (phba->fc_eventTag != 0) 1125 if (phba->fc_eventTag != 0)
876 lpfc_linkdown(phba); 1126 lpfc_linkdown(phba);
877 }
878 } 1127 }
879 1128
880 phba->fc_eventTag = la->eventTag; 1129 phba->fc_eventTag = la->eventTag;
881 1130
882 if (la->attType == AT_LINK_UP) { 1131 if (la->attType == AT_LINK_UP) {
883 phba->fc_stat.LinkUp++; 1132 phba->fc_stat.LinkUp++;
884 if (phba->fc_flag & FC_LOOPBACK_MODE) { 1133 if (phba->link_flag & LS_LOOPBACK_MODE) {
885 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1134 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
886 "%d:1306 Link Up Event in loop back mode " 1135 "%d:1306 Link Up Event in loop back mode "
887 "x%x received Data: x%x x%x x%x x%x\n", 1136 "x%x received Data: x%x x%x x%x x%x\n",
@@ -903,7 +1152,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
903 "%d:1305 Link Down Event x%x received " 1152 "%d:1305 Link Down Event x%x received "
904 "Data: x%x x%x x%x\n", 1153 "Data: x%x x%x x%x\n",
905 phba->brd_no, la->eventTag, phba->fc_eventTag, 1154 phba->brd_no, la->eventTag, phba->fc_eventTag,
906 phba->hba_state, phba->fc_flag); 1155 phba->pport->port_state, vport->fc_flag);
907 lpfc_mbx_issue_link_down(phba); 1156 lpfc_mbx_issue_link_down(phba);
908 } 1157 }
909 1158
@@ -921,31 +1170,115 @@ lpfc_mbx_cmpl_read_la_free_mbuf:
921 * handed off to the SLI layer. 1170 * handed off to the SLI layer.
922 */ 1171 */
923void 1172void
924lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1173lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
925{ 1174{
926 struct lpfc_sli *psli; 1175 struct lpfc_vport *vport = pmb->vport;
927 MAILBOX_t *mb; 1176 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
928 struct lpfc_dmabuf *mp; 1177 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
929 struct lpfc_nodelist *ndlp;
930
931 psli = &phba->sli;
932 mb = &pmb->mb;
933
934 ndlp = (struct lpfc_nodelist *) pmb->context2;
935 mp = (struct lpfc_dmabuf *) (pmb->context1);
936 1178
937 pmb->context1 = NULL; 1179 pmb->context1 = NULL;
938 1180
939 /* Good status, call state machine */ 1181 /* Good status, call state machine */
940 lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); 1182 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
941 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1183 lpfc_mbuf_free(phba, mp->virt, mp->phys);
942 kfree(mp); 1184 kfree(mp);
943 mempool_free( pmb, phba->mbox_mem_pool); 1185 mempool_free(pmb, phba->mbox_mem_pool);
944 lpfc_nlp_put(ndlp); 1186 lpfc_nlp_put(ndlp);
945 1187
946 return; 1188 return;
947} 1189}
948 1190
1191static void
1192lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1193{
1194 MAILBOX_t *mb = &pmb->mb;
1195 struct lpfc_vport *vport = pmb->vport;
1196 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1197
1198 switch (mb->mbxStatus) {
1199 case 0x0011:
1200 case 0x0020:
1201 case 0x9700:
1202 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1203 "%d (%d):0911 cmpl_unreg_vpi, "
1204 "mb status = 0x%x\n",
1205 phba->brd_no, vport->vpi, mb->mbxStatus);
1206 break;
1207 }
1208 vport->unreg_vpi_cmpl = VPORT_OK;
1209 mempool_free(pmb, phba->mbox_mem_pool);
1210 /*
1211 * This shost reference might have been taken at the beginning of
1212 * lpfc_vport_delete()
1213 */
1214 if (vport->load_flag & FC_UNLOADING)
1215 scsi_host_put(shost);
1216}
1217
1218void
1219lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1220{
1221 struct lpfc_hba *phba = vport->phba;
1222 LPFC_MBOXQ_t *mbox;
1223 int rc;
1224
1225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1226 if (!mbox)
1227 return;
1228
1229 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1230 mbox->vport = vport;
1231 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1232 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1233 if (rc == MBX_NOT_FINISHED) {
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1235 "%d (%d):1800 Could not issue unreg_vpi\n",
1236 phba->brd_no, vport->vpi);
1237 mempool_free(mbox, phba->mbox_mem_pool);
1238 vport->unreg_vpi_cmpl = VPORT_ERROR;
1239 }
1240}
1241
1242static void
1243lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1244{
1245 struct lpfc_vport *vport = pmb->vport;
1246 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1247 MAILBOX_t *mb = &pmb->mb;
1248
1249 switch (mb->mbxStatus) {
1250 case 0x0011:
1251 case 0x9601:
1252 case 0x9602:
1253 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1254 "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
1255 phba->brd_no, vport->vpi, mb->mbxStatus);
1256 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1257 spin_lock_irq(shost->host_lock);
1258 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1259 spin_unlock_irq(shost->host_lock);
1260 vport->fc_myDID = 0;
1261 goto out;
1262 }
1263
1264 vport->num_disc_nodes = 0;
1265 /* go thru NPR list and issue ELS PLOGIs */
1266 if (vport->fc_npr_cnt)
1267 lpfc_els_disc_plogi(vport);
1268
1269 if (!vport->num_disc_nodes) {
1270 spin_lock_irq(shost->host_lock);
1271 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1272 spin_unlock_irq(shost->host_lock);
1273 lpfc_can_disctmo(vport);
1274 }
1275 vport->port_state = LPFC_VPORT_READY;
1276
1277out:
1278 mempool_free(pmb, phba->mbox_mem_pool);
1279 return;
1280}
1281
949/* 1282/*
950 * This routine handles processing a Fabric REG_LOGIN mailbox 1283 * This routine handles processing a Fabric REG_LOGIN mailbox
951 * command upon completion. It is setup in the LPFC_MBOXQ 1284 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -953,20 +1286,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
953 * handed off to the SLI layer. 1286 * handed off to the SLI layer.
954 */ 1287 */
955void 1288void
956lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1289lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
957{ 1290{
958 struct lpfc_sli *psli; 1291 struct lpfc_vport *vport = pmb->vport;
959 MAILBOX_t *mb; 1292 struct lpfc_vport *next_vport;
960 struct lpfc_dmabuf *mp; 1293 MAILBOX_t *mb = &pmb->mb;
1294 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
961 struct lpfc_nodelist *ndlp; 1295 struct lpfc_nodelist *ndlp;
962 struct lpfc_nodelist *ndlp_fdmi;
963
964
965 psli = &phba->sli;
966 mb = &pmb->mb;
967
968 ndlp = (struct lpfc_nodelist *) pmb->context2; 1296 ndlp = (struct lpfc_nodelist *) pmb->context2;
969 mp = (struct lpfc_dmabuf *) (pmb->context1);
970 1297
971 pmb->context1 = NULL; 1298 pmb->context1 = NULL;
972 pmb->context2 = NULL; 1299 pmb->context2 = NULL;
@@ -977,60 +1304,46 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
977 mempool_free(pmb, phba->mbox_mem_pool); 1304 mempool_free(pmb, phba->mbox_mem_pool);
978 lpfc_nlp_put(ndlp); 1305 lpfc_nlp_put(ndlp);
979 1306
980 /* FLOGI failed, so just use loop map to make discovery list */ 1307 if (phba->fc_topology == TOPOLOGY_LOOP) {
981 lpfc_disc_list_loopmap(phba); 1308 /* FLOGI failed, use loop map to make discovery list */
1309 lpfc_disc_list_loopmap(vport);
1310
1311 /* Start discovery */
1312 lpfc_disc_start(vport);
1313 return;
1314 }
1315
1316 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1317 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1318 "%d (%d):0258 Register Fabric login error: 0x%x\n",
1319 phba->brd_no, vport->vpi, mb->mbxStatus);
982 1320
983 /* Start discovery */
984 lpfc_disc_start(phba);
985 return; 1321 return;
986 } 1322 }
987 1323
988 ndlp->nlp_rpi = mb->un.varWords[0]; 1324 ndlp->nlp_rpi = mb->un.varWords[0];
989 ndlp->nlp_type |= NLP_FABRIC; 1325 ndlp->nlp_type |= NLP_FABRIC;
990 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1326 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
991 1327
992 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ 1328 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
993 1329
994 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) { 1330 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
995 /* This NPort has been assigned an NPort_ID by the fabric as a 1331 list_for_each_entry(next_vport, &phba->port_list, listentry) {
996 * result of the completed fabric login. Issue a State Change 1332 if (next_vport->port_type == LPFC_PHYSICAL_PORT)
997 * Registration (SCR) ELS request to the fabric controller 1333 continue;
998 * (SCR_DID) so that this NPort gets RSCN events from the 1334
999 * fabric. 1335 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1000 */ 1336 lpfc_initial_fdisc(next_vport);
1001 lpfc_issue_els_scr(phba, SCR_DID, 0); 1337 else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1002 1338 lpfc_vport_set_state(vport,
1003 ndlp = lpfc_findnode_did(phba, NameServer_DID); 1339 FC_VPORT_NO_FABRIC_SUPP);
1004 if (!ndlp) { 1340 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1005 /* Allocate a new node instance. If the pool is empty, 1341 "%d (%d):0259 No NPIV Fabric "
1006 * start the discovery process and skip the Nameserver 1342 "support\n",
1007 * login process. This is attempted again later on. 1343 phba->brd_no, vport->vpi);
1008 * Otherwise, issue a Port Login (PLOGI) to NameServer.
1009 */
1010 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1011 if (!ndlp) {
1012 lpfc_disc_start(phba);
1013 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1014 kfree(mp);
1015 mempool_free(pmb, phba->mbox_mem_pool);
1016 return;
1017 } else {
1018 lpfc_nlp_init(phba, ndlp, NameServer_DID);
1019 ndlp->nlp_type |= NLP_FABRIC;
1020 }
1021 }
1022 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1023 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
1024 if (phba->cfg_fdmi_on) {
1025 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
1026 GFP_KERNEL);
1027 if (ndlp_fdmi) {
1028 lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
1029 ndlp_fdmi->nlp_type |= NLP_FABRIC;
1030 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
1031 lpfc_issue_els_plogi(phba, FDMI_DID, 0);
1032 } 1344 }
1033 } 1345 }
1346 lpfc_do_scr_ns_plogi(phba, vport);
1034 } 1347 }
1035 1348
1036 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1349 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1046,32 +1359,36 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1046 * handed off to the SLI layer. 1359 * handed off to the SLI layer.
1047 */ 1360 */
1048void 1361void
1049lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1362lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1050{ 1363{
1051 struct lpfc_sli *psli; 1364 MAILBOX_t *mb = &pmb->mb;
1052 MAILBOX_t *mb; 1365 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1053 struct lpfc_dmabuf *mp; 1366 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1054 struct lpfc_nodelist *ndlp; 1367 struct lpfc_vport *vport = pmb->vport;
1055
1056 psli = &phba->sli;
1057 mb = &pmb->mb;
1058
1059 ndlp = (struct lpfc_nodelist *) pmb->context2;
1060 mp = (struct lpfc_dmabuf *) (pmb->context1);
1061 1368
1062 if (mb->mbxStatus) { 1369 if (mb->mbxStatus) {
1370out:
1063 lpfc_nlp_put(ndlp); 1371 lpfc_nlp_put(ndlp);
1064 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1372 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1065 kfree(mp); 1373 kfree(mp);
1066 mempool_free(pmb, phba->mbox_mem_pool); 1374 mempool_free(pmb, phba->mbox_mem_pool);
1067 lpfc_drop_node(phba, ndlp); 1375 lpfc_drop_node(vport, ndlp);
1068 1376
1069 /* RegLogin failed, so just use loop map to make discovery 1377 if (phba->fc_topology == TOPOLOGY_LOOP) {
1070 list */ 1378 /*
1071 lpfc_disc_list_loopmap(phba); 1379 * RegLogin failed, use loop map to make discovery
1380 * list
1381 */
1382 lpfc_disc_list_loopmap(vport);
1072 1383
1073 /* Start discovery */ 1384 /* Start discovery */
1074 lpfc_disc_start(phba); 1385 lpfc_disc_start(vport);
1386 return;
1387 }
1388 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1389 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1390 "%d (%d):0260 Register NameServer error: 0x%x\n",
1391 phba->brd_no, vport->vpi, mb->mbxStatus);
1075 return; 1392 return;
1076 } 1393 }
1077 1394
@@ -1079,37 +1396,43 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1079 1396
1080 ndlp->nlp_rpi = mb->un.varWords[0]; 1397 ndlp->nlp_rpi = mb->un.varWords[0];
1081 ndlp->nlp_type |= NLP_FABRIC; 1398 ndlp->nlp_type |= NLP_FABRIC;
1082 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1399 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1083 1400
1084 if (phba->hba_state < LPFC_HBA_READY) { 1401 if (vport->port_state < LPFC_VPORT_READY) {
1085 /* Link up discovery requires Fabrib registration. */ 1402 /* Link up discovery requires Fabric registration. */
1086 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID); 1403 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1087 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN); 1404 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1088 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID); 1405 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1089 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID); 1406 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1407 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1408
1409 /* Issue SCR just before NameServer GID_FT Query */
1410 lpfc_issue_els_scr(vport, SCR_DID, 0);
1090 } 1411 }
1091 1412
1092 phba->fc_ns_retry = 0; 1413 vport->fc_ns_retry = 0;
1093 /* Good status, issue CT Request to NameServer */ 1414 /* Good status, issue CT Request to NameServer */
1094 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) { 1415 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1095 /* Cannot issue NameServer Query, so finish up discovery */ 1416 /* Cannot issue NameServer Query, so finish up discovery */
1096 lpfc_disc_start(phba); 1417 goto out;
1097 } 1418 }
1098 1419
1099 lpfc_nlp_put(ndlp); 1420 lpfc_nlp_put(ndlp);
1100 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1421 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1101 kfree(mp); 1422 kfree(mp);
1102 mempool_free( pmb, phba->mbox_mem_pool); 1423 mempool_free(pmb, phba->mbox_mem_pool);
1103 1424
1104 return; 1425 return;
1105} 1426}
1106 1427
1107static void 1428static void
1108lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1429lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1109{ 1430{
1110 struct fc_rport *rport; 1431 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1432 struct fc_rport *rport;
1111 struct lpfc_rport_data *rdata; 1433 struct lpfc_rport_data *rdata;
1112 struct fc_rport_identifiers rport_ids; 1434 struct fc_rport_identifiers rport_ids;
1435 struct lpfc_hba *phba = vport->phba;
1113 1436
1114 /* Remote port has reappeared. Re-register w/ FC transport */ 1437 /* Remote port has reappeared. Re-register w/ FC transport */
1115 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 1438 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
@@ -1125,10 +1448,15 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1125 * registered the port. 1448 * registered the port.
1126 */ 1449 */
1127 if (ndlp->rport && ndlp->rport->dd_data && 1450 if (ndlp->rport && ndlp->rport->dd_data &&
1128 *(struct lpfc_rport_data **) ndlp->rport->dd_data) { 1451 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1129 lpfc_nlp_put(ndlp); 1452 lpfc_nlp_put(ndlp);
1130 } 1453 }
1131 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids); 1454
1455 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1456 "rport add: did:x%x flg:x%x type x%x",
1457 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1458
1459 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1132 if (!rport || !get_device(&rport->dev)) { 1460 if (!rport || !get_device(&rport->dev)) {
1133 dev_printk(KERN_WARNING, &phba->pcidev->dev, 1461 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1134 "Warning: fc_remote_port_add failed\n"); 1462 "Warning: fc_remote_port_add failed\n");
@@ -1151,25 +1479,20 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1151 fc_remote_port_rolechg(rport, rport_ids.roles); 1479 fc_remote_port_rolechg(rport, rport_ids.roles);
1152 1480
1153 if ((rport->scsi_target_id != -1) && 1481 if ((rport->scsi_target_id != -1) &&
1154 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 1482 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1155 ndlp->nlp_sid = rport->scsi_target_id; 1483 ndlp->nlp_sid = rport->scsi_target_id;
1156 } 1484 }
1157
1158 return; 1485 return;
1159} 1486}
1160 1487
1161static void 1488static void
1162lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1489lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1163{ 1490{
1164 struct fc_rport *rport = ndlp->rport; 1491 struct fc_rport *rport = ndlp->rport;
1165 struct lpfc_rport_data *rdata = rport->dd_data;
1166 1492
1167 if (rport->scsi_target_id == -1) { 1493 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1168 ndlp->rport = NULL; 1494 "rport delete: did:x%x flg:x%x type x%x",
1169 rdata->pnode = NULL; 1495 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1170 lpfc_nlp_put(ndlp);
1171 put_device(&rport->dev);
1172 }
1173 1496
1174 fc_remote_port_delete(rport); 1497 fc_remote_port_delete(rport);
1175 1498
@@ -1177,42 +1500,46 @@ lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1177} 1500}
1178 1501
1179static void 1502static void
1180lpfc_nlp_counters(struct lpfc_hba *phba, int state, int count) 1503lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1181{ 1504{
1182 spin_lock_irq(phba->host->host_lock); 1505 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1506
1507 spin_lock_irq(shost->host_lock);
1183 switch (state) { 1508 switch (state) {
1184 case NLP_STE_UNUSED_NODE: 1509 case NLP_STE_UNUSED_NODE:
1185 phba->fc_unused_cnt += count; 1510 vport->fc_unused_cnt += count;
1186 break; 1511 break;
1187 case NLP_STE_PLOGI_ISSUE: 1512 case NLP_STE_PLOGI_ISSUE:
1188 phba->fc_plogi_cnt += count; 1513 vport->fc_plogi_cnt += count;
1189 break; 1514 break;
1190 case NLP_STE_ADISC_ISSUE: 1515 case NLP_STE_ADISC_ISSUE:
1191 phba->fc_adisc_cnt += count; 1516 vport->fc_adisc_cnt += count;
1192 break; 1517 break;
1193 case NLP_STE_REG_LOGIN_ISSUE: 1518 case NLP_STE_REG_LOGIN_ISSUE:
1194 phba->fc_reglogin_cnt += count; 1519 vport->fc_reglogin_cnt += count;
1195 break; 1520 break;
1196 case NLP_STE_PRLI_ISSUE: 1521 case NLP_STE_PRLI_ISSUE:
1197 phba->fc_prli_cnt += count; 1522 vport->fc_prli_cnt += count;
1198 break; 1523 break;
1199 case NLP_STE_UNMAPPED_NODE: 1524 case NLP_STE_UNMAPPED_NODE:
1200 phba->fc_unmap_cnt += count; 1525 vport->fc_unmap_cnt += count;
1201 break; 1526 break;
1202 case NLP_STE_MAPPED_NODE: 1527 case NLP_STE_MAPPED_NODE:
1203 phba->fc_map_cnt += count; 1528 vport->fc_map_cnt += count;
1204 break; 1529 break;
1205 case NLP_STE_NPR_NODE: 1530 case NLP_STE_NPR_NODE:
1206 phba->fc_npr_cnt += count; 1531 vport->fc_npr_cnt += count;
1207 break; 1532 break;
1208 } 1533 }
1209 spin_unlock_irq(phba->host->host_lock); 1534 spin_unlock_irq(shost->host_lock);
1210} 1535}
1211 1536
1212static void 1537static void
1213lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1538lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1214 int old_state, int new_state) 1539 int old_state, int new_state)
1215{ 1540{
1541 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1542
1216 if (new_state == NLP_STE_UNMAPPED_NODE) { 1543 if (new_state == NLP_STE_UNMAPPED_NODE) {
1217 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 1544 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1218 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1545 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
@@ -1226,35 +1553,34 @@ lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1226 /* Transport interface */ 1553 /* Transport interface */
1227 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || 1554 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1228 old_state == NLP_STE_UNMAPPED_NODE)) { 1555 old_state == NLP_STE_UNMAPPED_NODE)) {
1229 phba->nport_event_cnt++; 1556 vport->phba->nport_event_cnt++;
1230 lpfc_unregister_remote_port(phba, ndlp); 1557 lpfc_unregister_remote_port(ndlp);
1231 } 1558 }
1232 1559
1233 if (new_state == NLP_STE_MAPPED_NODE || 1560 if (new_state == NLP_STE_MAPPED_NODE ||
1234 new_state == NLP_STE_UNMAPPED_NODE) { 1561 new_state == NLP_STE_UNMAPPED_NODE) {
1235 phba->nport_event_cnt++; 1562 vport->phba->nport_event_cnt++;
1236 /* 1563 /*
1237 * Tell the fc transport about the port, if we haven't 1564 * Tell the fc transport about the port, if we haven't
1238 * already. If we have, and it's a scsi entity, be 1565 * already. If we have, and it's a scsi entity, be
1239 * sure to unblock any attached scsi devices 1566 * sure to unblock any attached scsi devices
1240 */ 1567 */
1241 lpfc_register_remote_port(phba, ndlp); 1568 lpfc_register_remote_port(vport, ndlp);
1242 } 1569 }
1243 1570 /*
1244 /* 1571 * if we added to Mapped list, but the remote port
1245 * if we added to Mapped list, but the remote port 1572 * registration failed or assigned a target id outside
1246 * registration failed or assigned a target id outside 1573 * our presentable range - move the node to the
1247 * our presentable range - move the node to the 1574 * Unmapped List
1248 * Unmapped List 1575 */
1249 */
1250 if (new_state == NLP_STE_MAPPED_NODE && 1576 if (new_state == NLP_STE_MAPPED_NODE &&
1251 (!ndlp->rport || 1577 (!ndlp->rport ||
1252 ndlp->rport->scsi_target_id == -1 || 1578 ndlp->rport->scsi_target_id == -1 ||
1253 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { 1579 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1254 spin_lock_irq(phba->host->host_lock); 1580 spin_lock_irq(shost->host_lock);
1255 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; 1581 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1256 spin_unlock_irq(phba->host->host_lock); 1582 spin_unlock_irq(shost->host_lock);
1257 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1583 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1258 } 1584 }
1259} 1585}
1260 1586
@@ -1280,61 +1606,74 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
1280} 1606}
1281 1607
1282void 1608void
1283lpfc_nlp_set_state(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int state) 1609lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1610 int state)
1284{ 1611{
1612 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1285 int old_state = ndlp->nlp_state; 1613 int old_state = ndlp->nlp_state;
1286 char name1[16], name2[16]; 1614 char name1[16], name2[16];
1287 1615
1288 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1616 lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE,
1289 "%d:0904 NPort state transition x%06x, %s -> %s\n", 1617 "%d (%d):0904 NPort state transition x%06x, %s -> %s\n",
1290 phba->brd_no, 1618 vport->phba->brd_no, vport->vpi,
1291 ndlp->nlp_DID, 1619 ndlp->nlp_DID,
1292 lpfc_nlp_state_name(name1, sizeof(name1), old_state), 1620 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1293 lpfc_nlp_state_name(name2, sizeof(name2), state)); 1621 lpfc_nlp_state_name(name2, sizeof(name2), state));
1622
1623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1624 "node statechg did:x%x old:%d ste:%d",
1625 ndlp->nlp_DID, old_state, state);
1626
1294 if (old_state == NLP_STE_NPR_NODE && 1627 if (old_state == NLP_STE_NPR_NODE &&
1295 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 && 1628 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1296 state != NLP_STE_NPR_NODE) 1629 state != NLP_STE_NPR_NODE)
1297 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1630 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1298 if (old_state == NLP_STE_UNMAPPED_NODE) { 1631 if (old_state == NLP_STE_UNMAPPED_NODE) {
1299 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; 1632 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1300 ndlp->nlp_type &= ~NLP_FC_NODE; 1633 ndlp->nlp_type &= ~NLP_FC_NODE;
1301 } 1634 }
1302 1635
1303 if (list_empty(&ndlp->nlp_listp)) { 1636 if (list_empty(&ndlp->nlp_listp)) {
1304 spin_lock_irq(phba->host->host_lock); 1637 spin_lock_irq(shost->host_lock);
1305 list_add_tail(&ndlp->nlp_listp, &phba->fc_nodes); 1638 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1306 spin_unlock_irq(phba->host->host_lock); 1639 spin_unlock_irq(shost->host_lock);
1307 } else if (old_state) 1640 } else if (old_state)
1308 lpfc_nlp_counters(phba, old_state, -1); 1641 lpfc_nlp_counters(vport, old_state, -1);
1309 1642
1310 ndlp->nlp_state = state; 1643 ndlp->nlp_state = state;
1311 lpfc_nlp_counters(phba, state, 1); 1644 lpfc_nlp_counters(vport, state, 1);
1312 lpfc_nlp_state_cleanup(phba, ndlp, old_state, state); 1645 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1313} 1646}
1314 1647
1315void 1648void
1316lpfc_dequeue_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1649lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1317{ 1650{
1651 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1652
1318 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1653 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1319 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1654 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1320 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 1655 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1321 lpfc_nlp_counters(phba, ndlp->nlp_state, -1); 1656 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1322 spin_lock_irq(phba->host->host_lock); 1657 spin_lock_irq(shost->host_lock);
1323 list_del_init(&ndlp->nlp_listp); 1658 list_del_init(&ndlp->nlp_listp);
1324 spin_unlock_irq(phba->host->host_lock); 1659 spin_unlock_irq(shost->host_lock);
1325 lpfc_nlp_state_cleanup(phba, ndlp, ndlp->nlp_state, 0); 1660 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1661 NLP_STE_UNUSED_NODE);
1326} 1662}
1327 1663
1328void 1664void
1329lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1665lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1330{ 1666{
1667 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1668
1331 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1669 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1332 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1670 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1333 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 1671 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1334 lpfc_nlp_counters(phba, ndlp->nlp_state, -1); 1672 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1335 spin_lock_irq(phba->host->host_lock); 1673 spin_lock_irq(shost->host_lock);
1336 list_del_init(&ndlp->nlp_listp); 1674 list_del_init(&ndlp->nlp_listp);
1337 spin_unlock_irq(phba->host->host_lock); 1675 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
1676 spin_unlock_irq(shost->host_lock);
1338 lpfc_nlp_put(ndlp); 1677 lpfc_nlp_put(ndlp);
1339} 1678}
1340 1679
@@ -1342,11 +1681,13 @@ lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1342 * Start / ReStart rescue timer for Discovery / RSCN handling 1681 * Start / ReStart rescue timer for Discovery / RSCN handling
1343 */ 1682 */
1344void 1683void
1345lpfc_set_disctmo(struct lpfc_hba * phba) 1684lpfc_set_disctmo(struct lpfc_vport *vport)
1346{ 1685{
1686 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1687 struct lpfc_hba *phba = vport->phba;
1347 uint32_t tmo; 1688 uint32_t tmo;
1348 1689
1349 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) { 1690 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1350 /* For FAN, timeout should be greater then edtov */ 1691 /* For FAN, timeout should be greater then edtov */
1351 tmo = (((phba->fc_edtov + 999) / 1000) + 1); 1692 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1352 } else { 1693 } else {
@@ -1356,18 +1697,25 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
1356 tmo = ((phba->fc_ratov * 3) + 3); 1697 tmo = ((phba->fc_ratov * 3) + 3);
1357 } 1698 }
1358 1699
1359 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo); 1700
1360 spin_lock_irq(phba->host->host_lock); 1701 if (!timer_pending(&vport->fc_disctmo)) {
1361 phba->fc_flag |= FC_DISC_TMO; 1702 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1362 spin_unlock_irq(phba->host->host_lock); 1703 "set disc timer: tmo:x%x state:x%x flg:x%x",
1704 tmo, vport->port_state, vport->fc_flag);
1705 }
1706
1707 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1708 spin_lock_irq(shost->host_lock);
1709 vport->fc_flag |= FC_DISC_TMO;
1710 spin_unlock_irq(shost->host_lock);
1363 1711
1364 /* Start Discovery Timer state <hba_state> */ 1712 /* Start Discovery Timer state <hba_state> */
1365 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1713 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1366 "%d:0247 Start Discovery Timer state x%x " 1714 "%d (%d):0247 Start Discovery Timer state x%x "
1367 "Data: x%x x%lx x%x x%x\n", 1715 "Data: x%x x%lx x%x x%x\n",
1368 phba->brd_no, 1716 phba->brd_no, vport->vpi, vport->port_state, tmo,
1369 phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo, 1717 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1370 phba->fc_plogi_cnt, phba->fc_adisc_cnt); 1718 vport->fc_adisc_cnt);
1371 1719
1372 return; 1720 return;
1373} 1721}
@@ -1376,23 +1724,34 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
1376 * Cancel rescue timer for Discovery / RSCN handling 1724 * Cancel rescue timer for Discovery / RSCN handling
1377 */ 1725 */
1378int 1726int
1379lpfc_can_disctmo(struct lpfc_hba * phba) 1727lpfc_can_disctmo(struct lpfc_vport *vport)
1380{ 1728{
1729 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1730 struct lpfc_hba *phba = vport->phba;
1731 unsigned long iflags;
1732
1733 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1734 "can disc timer: state:x%x rtry:x%x flg:x%x",
1735 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1736
1381 /* Turn off discovery timer if its running */ 1737 /* Turn off discovery timer if its running */
1382 if (phba->fc_flag & FC_DISC_TMO) { 1738 if (vport->fc_flag & FC_DISC_TMO) {
1383 spin_lock_irq(phba->host->host_lock); 1739 spin_lock_irqsave(shost->host_lock, iflags);
1384 phba->fc_flag &= ~FC_DISC_TMO; 1740 vport->fc_flag &= ~FC_DISC_TMO;
1385 spin_unlock_irq(phba->host->host_lock); 1741 spin_unlock_irqrestore(shost->host_lock, iflags);
1386 del_timer_sync(&phba->fc_disctmo); 1742 del_timer_sync(&vport->fc_disctmo);
1387 phba->work_hba_events &= ~WORKER_DISC_TMO; 1743 spin_lock_irqsave(&vport->work_port_lock, iflags);
1744 vport->work_port_events &= ~WORKER_DISC_TMO;
1745 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1388 } 1746 }
1389 1747
1390 /* Cancel Discovery Timer state <hba_state> */ 1748 /* Cancel Discovery Timer state <hba_state> */
1391 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1749 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1392 "%d:0248 Cancel Discovery Timer state x%x " 1750 "%d (%d):0248 Cancel Discovery Timer state x%x "
1393 "Data: x%x x%x x%x\n", 1751 "Data: x%x x%x x%x\n",
1394 phba->brd_no, phba->hba_state, phba->fc_flag, 1752 phba->brd_no, vport->vpi, vport->port_state,
1395 phba->fc_plogi_cnt, phba->fc_adisc_cnt); 1753 vport->fc_flag, vport->fc_plogi_cnt,
1754 vport->fc_adisc_cnt);
1396 1755
1397 return 0; 1756 return 0;
1398} 1757}
@@ -1402,15 +1761,18 @@ lpfc_can_disctmo(struct lpfc_hba * phba)
1402 * Return true if iocb matches the specified nport 1761 * Return true if iocb matches the specified nport
1403 */ 1762 */
1404int 1763int
1405lpfc_check_sli_ndlp(struct lpfc_hba * phba, 1764lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1406 struct lpfc_sli_ring * pring, 1765 struct lpfc_sli_ring *pring,
1407 struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp) 1766 struct lpfc_iocbq *iocb,
1767 struct lpfc_nodelist *ndlp)
1408{ 1768{
1409 struct lpfc_sli *psli; 1769 struct lpfc_sli *psli = &phba->sli;
1410 IOCB_t *icmd; 1770 IOCB_t *icmd = &iocb->iocb;
1771 struct lpfc_vport *vport = ndlp->vport;
1772
1773 if (iocb->vport != vport)
1774 return 0;
1411 1775
1412 psli = &phba->sli;
1413 icmd = &iocb->iocb;
1414 if (pring->ringno == LPFC_ELS_RING) { 1776 if (pring->ringno == LPFC_ELS_RING) {
1415 switch (icmd->ulpCommand) { 1777 switch (icmd->ulpCommand) {
1416 case CMD_GEN_REQUEST64_CR: 1778 case CMD_GEN_REQUEST64_CR:
@@ -1428,7 +1790,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1428 } else if (pring->ringno == psli->fcp_ring) { 1790 } else if (pring->ringno == psli->fcp_ring) {
1429 /* Skip match check if waiting to relogin to FCP target */ 1791 /* Skip match check if waiting to relogin to FCP target */
1430 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 1792 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1431 (ndlp->nlp_flag & NLP_DELAY_TMO)) { 1793 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1432 return 0; 1794 return 0;
1433 } 1795 }
1434 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { 1796 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
@@ -1445,7 +1807,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1445 * associated with nlp_rpi in the LPFC_NODELIST entry. 1807 * associated with nlp_rpi in the LPFC_NODELIST entry.
1446 */ 1808 */
1447static int 1809static int
1448lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1810lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1449{ 1811{
1450 LIST_HEAD(completions); 1812 LIST_HEAD(completions);
1451 struct lpfc_sli *psli; 1813 struct lpfc_sli *psli;
@@ -1454,6 +1816,8 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1454 IOCB_t *icmd; 1816 IOCB_t *icmd;
1455 uint32_t rpi, i; 1817 uint32_t rpi, i;
1456 1818
1819 lpfc_fabric_abort_nport(ndlp);
1820
1457 /* 1821 /*
1458 * Everything that matches on txcmplq will be returned 1822 * Everything that matches on txcmplq will be returned
1459 * by firmware with a no rpi error. 1823 * by firmware with a no rpi error.
@@ -1465,15 +1829,15 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1465 for (i = 0; i < psli->num_rings; i++) { 1829 for (i = 0; i < psli->num_rings; i++) {
1466 pring = &psli->ring[i]; 1830 pring = &psli->ring[i];
1467 1831
1468 spin_lock_irq(phba->host->host_lock); 1832 spin_lock_irq(&phba->hbalock);
1469 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, 1833 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1470 list) { 1834 list) {
1471 /* 1835 /*
1472 * Check to see if iocb matches the nport we are 1836 * Check to see if iocb matches the nport we are
1473 * looking for 1837 * looking for
1474 */ 1838 */
1475 if ((lpfc_check_sli_ndlp 1839 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1476 (phba, pring, iocb, ndlp))) { 1840 ndlp))) {
1477 /* It matches, so deque and call compl 1841 /* It matches, so deque and call compl
1478 with an error */ 1842 with an error */
1479 list_move_tail(&iocb->list, 1843 list_move_tail(&iocb->list,
@@ -1481,22 +1845,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1481 pring->txq_cnt--; 1845 pring->txq_cnt--;
1482 } 1846 }
1483 } 1847 }
1484 spin_unlock_irq(phba->host->host_lock); 1848 spin_unlock_irq(&phba->hbalock);
1485
1486 } 1849 }
1487 } 1850 }
1488 1851
1489 while (!list_empty(&completions)) { 1852 while (!list_empty(&completions)) {
1490 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1853 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1491 list_del(&iocb->list); 1854 list_del_init(&iocb->list);
1492 1855
1493 if (iocb->iocb_cmpl) { 1856 if (!iocb->iocb_cmpl)
1857 lpfc_sli_release_iocbq(phba, iocb);
1858 else {
1494 icmd = &iocb->iocb; 1859 icmd = &iocb->iocb;
1495 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1860 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1496 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1861 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1497 (iocb->iocb_cmpl) (phba, iocb, iocb); 1862 (iocb->iocb_cmpl)(phba, iocb, iocb);
1498 } else 1863 }
1499 lpfc_sli_release_iocbq(phba, iocb);
1500 } 1864 }
1501 1865
1502 return 0; 1866 return 0;
@@ -1512,19 +1876,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1512 * we are waiting to PLOGI back to the remote NPort. 1876 * we are waiting to PLOGI back to the remote NPort.
1513 */ 1877 */
1514int 1878int
1515lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1879lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1516{ 1880{
1517 LPFC_MBOXQ_t *mbox; 1881 struct lpfc_hba *phba = vport->phba;
1882 LPFC_MBOXQ_t *mbox;
1518 int rc; 1883 int rc;
1519 1884
1520 if (ndlp->nlp_rpi) { 1885 if (ndlp->nlp_rpi) {
1521 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 1886 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1522 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox); 1887 if (mbox) {
1523 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 1888 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1524 rc = lpfc_sli_issue_mbox 1889 mbox->vport = vport;
1525 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 1890 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1891 rc = lpfc_sli_issue_mbox(phba, mbox,
1892 (MBX_NOWAIT | MBX_STOP_IOCB));
1526 if (rc == MBX_NOT_FINISHED) 1893 if (rc == MBX_NOT_FINISHED)
1527 mempool_free( mbox, phba->mbox_mem_pool); 1894 mempool_free(mbox, phba->mbox_mem_pool);
1528 } 1895 }
1529 lpfc_no_rpi(phba, ndlp); 1896 lpfc_no_rpi(phba, ndlp);
1530 ndlp->nlp_rpi = 0; 1897 ndlp->nlp_rpi = 0;
@@ -1533,25 +1900,70 @@ lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1533 return 0; 1900 return 0;
1534} 1901}
1535 1902
1903void
1904lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1905{
1906 struct lpfc_hba *phba = vport->phba;
1907 LPFC_MBOXQ_t *mbox;
1908 int rc;
1909
1910 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1911 if (mbox) {
1912 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1913 mbox->vport = vport;
1914 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1915 rc = lpfc_sli_issue_mbox(phba, mbox,
1916 (MBX_NOWAIT | MBX_STOP_IOCB));
1917 if (rc == MBX_NOT_FINISHED) {
1918 mempool_free(mbox, phba->mbox_mem_pool);
1919 }
1920 }
1921}
1922
1923void
1924lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1925{
1926 struct lpfc_hba *phba = vport->phba;
1927 LPFC_MBOXQ_t *mbox;
1928 int rc;
1929
1930 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1931 if (mbox) {
1932 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1933 mbox->vport = vport;
1934 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1935 rc = lpfc_sli_issue_mbox(phba, mbox,
1936 (MBX_NOWAIT | MBX_STOP_IOCB));
1937 if (rc == MBX_NOT_FINISHED) {
1938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1939 "%d (%d):1815 Could not issue "
1940 "unreg_did (default rpis)\n",
1941 phba->brd_no, vport->vpi);
1942 mempool_free(mbox, phba->mbox_mem_pool);
1943 }
1944 }
1945}
1946
1536/* 1947/*
1537 * Free resources associated with LPFC_NODELIST entry 1948 * Free resources associated with LPFC_NODELIST entry
1538 * so it can be freed. 1949 * so it can be freed.
1539 */ 1950 */
1540static int 1951static int
1541lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1952lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1542{ 1953{
1543 LPFC_MBOXQ_t *mb; 1954 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1544 LPFC_MBOXQ_t *nextmb; 1955 struct lpfc_hba *phba = vport->phba;
1956 LPFC_MBOXQ_t *mb, *nextmb;
1545 struct lpfc_dmabuf *mp; 1957 struct lpfc_dmabuf *mp;
1546 1958
1547 /* Cleanup node for NPort <nlp_DID> */ 1959 /* Cleanup node for NPort <nlp_DID> */
1548 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1960 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1549 "%d:0900 Cleanup node for NPort x%x " 1961 "%d (%d):0900 Cleanup node for NPort x%x "
1550 "Data: x%x x%x x%x\n", 1962 "Data: x%x x%x x%x\n",
1551 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 1963 phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
1552 ndlp->nlp_state, ndlp->nlp_rpi); 1964 ndlp->nlp_state, ndlp->nlp_rpi);
1553 1965
1554 lpfc_dequeue_node(phba, ndlp); 1966 lpfc_dequeue_node(vport, ndlp);
1555 1967
1556 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1968 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1557 if ((mb = phba->sli.mbox_active)) { 1969 if ((mb = phba->sli.mbox_active)) {
@@ -1562,13 +1974,13 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1562 } 1974 }
1563 } 1975 }
1564 1976
1565 spin_lock_irq(phba->host->host_lock); 1977 spin_lock_irq(&phba->hbalock);
1566 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1978 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1567 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1979 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1568 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1980 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1569 mp = (struct lpfc_dmabuf *) (mb->context1); 1981 mp = (struct lpfc_dmabuf *) (mb->context1);
1570 if (mp) { 1982 if (mp) {
1571 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1983 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1572 kfree(mp); 1984 kfree(mp);
1573 } 1985 }
1574 list_del(&mb->list); 1986 list_del(&mb->list);
@@ -1576,20 +1988,27 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1576 lpfc_nlp_put(ndlp); 1988 lpfc_nlp_put(ndlp);
1577 } 1989 }
1578 } 1990 }
1579 spin_unlock_irq(phba->host->host_lock); 1991 spin_unlock_irq(&phba->hbalock);
1580 1992
1581 lpfc_els_abort(phba,ndlp); 1993 lpfc_els_abort(phba,ndlp);
1582 spin_lock_irq(phba->host->host_lock); 1994 spin_lock_irq(shost->host_lock);
1583 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 1995 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1584 spin_unlock_irq(phba->host->host_lock); 1996 spin_unlock_irq(shost->host_lock);
1585 1997
1586 ndlp->nlp_last_elscmd = 0; 1998 ndlp->nlp_last_elscmd = 0;
1587 del_timer_sync(&ndlp->nlp_delayfunc); 1999 del_timer_sync(&ndlp->nlp_delayfunc);
1588 2000
1589 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 2001 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1590 list_del_init(&ndlp->els_retry_evt.evt_listp); 2002 list_del_init(&ndlp->els_retry_evt.evt_listp);
2003 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
2004 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2005
2006 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
2007 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2008 complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
2009 }
1591 2010
1592 lpfc_unreg_rpi(phba, ndlp); 2011 lpfc_unreg_rpi(vport, ndlp);
1593 2012
1594 return 0; 2013 return 0;
1595} 2014}
@@ -1600,18 +2019,22 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1600 * machine, defer the free till we reach the end of the state machine. 2019 * machine, defer the free till we reach the end of the state machine.
1601 */ 2020 */
1602static void 2021static void
1603lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 2022lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1604{ 2023{
1605 struct lpfc_rport_data *rdata; 2024 struct lpfc_rport_data *rdata;
1606 2025
1607 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 2026 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1608 lpfc_cancel_retry_delay_tmo(phba, ndlp); 2027 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1609 } 2028 }
1610 2029
1611 lpfc_cleanup_node(phba, ndlp); 2030 lpfc_cleanup_node(vport, ndlp);
1612 2031
1613 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) { 2032 /*
1614 put_device(&ndlp->rport->dev); 2033 * We can get here with a non-NULL ndlp->rport because when we
2034 * unregister a rport we don't break the rport/node linkage. So if we
2035 * do, make sure we don't leaving any dangling pointers behind.
2036 */
2037 if (ndlp->rport) {
1615 rdata = ndlp->rport->dd_data; 2038 rdata = ndlp->rport->dd_data;
1616 rdata->pnode = NULL; 2039 rdata->pnode = NULL;
1617 ndlp->rport = NULL; 2040 ndlp->rport = NULL;
@@ -1619,11 +2042,10 @@ lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1619} 2042}
1620 2043
1621static int 2044static int
1622lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) 2045lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2046 uint32_t did)
1623{ 2047{
1624 D_ID mydid; 2048 D_ID mydid, ndlpdid, matchdid;
1625 D_ID ndlpdid;
1626 D_ID matchdid;
1627 2049
1628 if (did == Bcast_DID) 2050 if (did == Bcast_DID)
1629 return 0; 2051 return 0;
@@ -1637,7 +2059,7 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
1637 return 1; 2059 return 1;
1638 2060
1639 /* Next check for area/domain identically equals 0 match */ 2061 /* Next check for area/domain identically equals 0 match */
1640 mydid.un.word = phba->fc_myDID; 2062 mydid.un.word = vport->fc_myDID;
1641 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { 2063 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1642 return 0; 2064 return 0;
1643 } 2065 }
@@ -1669,101 +2091,116 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
1669} 2091}
1670 2092
1671/* Search for a nodelist entry */ 2093/* Search for a nodelist entry */
1672struct lpfc_nodelist * 2094static struct lpfc_nodelist *
1673lpfc_findnode_did(struct lpfc_hba *phba, uint32_t did) 2095__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1674{ 2096{
2097 struct lpfc_hba *phba = vport->phba;
1675 struct lpfc_nodelist *ndlp; 2098 struct lpfc_nodelist *ndlp;
1676 uint32_t data1; 2099 uint32_t data1;
1677 2100
1678 spin_lock_irq(phba->host->host_lock); 2101 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1679 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 2102 if (lpfc_matchdid(vport, ndlp, did)) {
1680 if (lpfc_matchdid(phba, ndlp, did)) {
1681 data1 = (((uint32_t) ndlp->nlp_state << 24) | 2103 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1682 ((uint32_t) ndlp->nlp_xri << 16) | 2104 ((uint32_t) ndlp->nlp_xri << 16) |
1683 ((uint32_t) ndlp->nlp_type << 8) | 2105 ((uint32_t) ndlp->nlp_type << 8) |
1684 ((uint32_t) ndlp->nlp_rpi & 0xff)); 2106 ((uint32_t) ndlp->nlp_rpi & 0xff));
1685 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 2107 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1686 "%d:0929 FIND node DID " 2108 "%d (%d):0929 FIND node DID "
1687 " Data: x%p x%x x%x x%x\n", 2109 " Data: x%p x%x x%x x%x\n",
1688 phba->brd_no, 2110 phba->brd_no, vport->vpi,
1689 ndlp, ndlp->nlp_DID, 2111 ndlp, ndlp->nlp_DID,
1690 ndlp->nlp_flag, data1); 2112 ndlp->nlp_flag, data1);
1691 spin_unlock_irq(phba->host->host_lock);
1692 return ndlp; 2113 return ndlp;
1693 } 2114 }
1694 } 2115 }
1695 spin_unlock_irq(phba->host->host_lock);
1696 2116
1697 /* FIND node did <did> NOT FOUND */ 2117 /* FIND node did <did> NOT FOUND */
1698 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 2118 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1699 "%d:0932 FIND node did x%x NOT FOUND.\n", 2119 "%d (%d):0932 FIND node did x%x NOT FOUND.\n",
1700 phba->brd_no, did); 2120 phba->brd_no, vport->vpi, did);
1701 return NULL; 2121 return NULL;
1702} 2122}
1703 2123
1704struct lpfc_nodelist * 2124struct lpfc_nodelist *
1705lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did) 2125lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1706{ 2126{
2127 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1707 struct lpfc_nodelist *ndlp; 2128 struct lpfc_nodelist *ndlp;
1708 2129
1709 ndlp = lpfc_findnode_did(phba, did); 2130 spin_lock_irq(shost->host_lock);
2131 ndlp = __lpfc_findnode_did(vport, did);
2132 spin_unlock_irq(shost->host_lock);
2133 return ndlp;
2134}
2135
2136struct lpfc_nodelist *
2137lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2138{
2139 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2140 struct lpfc_nodelist *ndlp;
2141
2142 ndlp = lpfc_findnode_did(vport, did);
1710 if (!ndlp) { 2143 if (!ndlp) {
1711 if ((phba->fc_flag & FC_RSCN_MODE) && 2144 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
1712 ((lpfc_rscn_payload_check(phba, did) == 0))) 2145 lpfc_rscn_payload_check(vport, did) == 0)
1713 return NULL; 2146 return NULL;
1714 ndlp = (struct lpfc_nodelist *) 2147 ndlp = (struct lpfc_nodelist *)
1715 mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2148 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
1716 if (!ndlp) 2149 if (!ndlp)
1717 return NULL; 2150 return NULL;
1718 lpfc_nlp_init(phba, ndlp, did); 2151 lpfc_nlp_init(vport, ndlp, did);
1719 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 2152 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2153 spin_lock_irq(shost->host_lock);
1720 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2154 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2155 spin_unlock_irq(shost->host_lock);
1721 return ndlp; 2156 return ndlp;
1722 } 2157 }
1723 if (phba->fc_flag & FC_RSCN_MODE) { 2158 if (vport->fc_flag & FC_RSCN_MODE) {
1724 if (lpfc_rscn_payload_check(phba, did)) { 2159 if (lpfc_rscn_payload_check(vport, did)) {
2160 spin_lock_irq(shost->host_lock);
1725 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2161 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2162 spin_unlock_irq(shost->host_lock);
1726 2163
1727 /* Since this node is marked for discovery, 2164 /* Since this node is marked for discovery,
1728 * delay timeout is not needed. 2165 * delay timeout is not needed.
1729 */ 2166 */
1730 if (ndlp->nlp_flag & NLP_DELAY_TMO) 2167 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1731 lpfc_cancel_retry_delay_tmo(phba, ndlp); 2168 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1732 } else 2169 } else
1733 ndlp = NULL; 2170 ndlp = NULL;
1734 } else { 2171 } else {
1735 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || 2172 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
1736 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) 2173 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
1737 return NULL; 2174 return NULL;
1738 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 2175 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2176 spin_lock_irq(shost->host_lock);
1739 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2177 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2178 spin_unlock_irq(shost->host_lock);
1740 } 2179 }
1741 return ndlp; 2180 return ndlp;
1742} 2181}
1743 2182
1744/* Build a list of nodes to discover based on the loopmap */ 2183/* Build a list of nodes to discover based on the loopmap */
1745void 2184void
1746lpfc_disc_list_loopmap(struct lpfc_hba * phba) 2185lpfc_disc_list_loopmap(struct lpfc_vport *vport)
1747{ 2186{
2187 struct lpfc_hba *phba = vport->phba;
1748 int j; 2188 int j;
1749 uint32_t alpa, index; 2189 uint32_t alpa, index;
1750 2190
1751 if (phba->hba_state <= LPFC_LINK_DOWN) { 2191 if (!lpfc_is_link_up(phba))
1752 return; 2192 return;
1753 } 2193
1754 if (phba->fc_topology != TOPOLOGY_LOOP) { 2194 if (phba->fc_topology != TOPOLOGY_LOOP)
1755 return; 2195 return;
1756 }
1757 2196
1758 /* Check for loop map present or not */ 2197 /* Check for loop map present or not */
1759 if (phba->alpa_map[0]) { 2198 if (phba->alpa_map[0]) {
1760 for (j = 1; j <= phba->alpa_map[0]; j++) { 2199 for (j = 1; j <= phba->alpa_map[0]; j++) {
1761 alpa = phba->alpa_map[j]; 2200 alpa = phba->alpa_map[j];
1762 2201 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
1763 if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1764 continue; 2202 continue;
1765 } 2203 lpfc_setup_disc_node(vport, alpa);
1766 lpfc_setup_disc_node(phba, alpa);
1767 } 2204 }
1768 } else { 2205 } else {
1769 /* No alpamap, so try all alpa's */ 2206 /* No alpamap, so try all alpa's */
@@ -1776,113 +2213,167 @@ lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1776 else 2213 else
1777 index = FC_MAXLOOP - j - 1; 2214 index = FC_MAXLOOP - j - 1;
1778 alpa = lpfcAlpaArray[index]; 2215 alpa = lpfcAlpaArray[index];
1779 if ((phba->fc_myDID & 0xff) == alpa) { 2216 if ((vport->fc_myDID & 0xff) == alpa)
1780 continue; 2217 continue;
1781 } 2218 lpfc_setup_disc_node(vport, alpa);
1782
1783 lpfc_setup_disc_node(phba, alpa);
1784 } 2219 }
1785 } 2220 }
1786 return; 2221 return;
1787} 2222}
1788 2223
1789/* Start Link up / RSCN discovery on NPR list */
1790void 2224void
1791lpfc_disc_start(struct lpfc_hba * phba) 2225lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
1792{ 2226{
1793 struct lpfc_sli *psli;
1794 LPFC_MBOXQ_t *mbox; 2227 LPFC_MBOXQ_t *mbox;
1795 struct lpfc_nodelist *ndlp, *next_ndlp; 2228 struct lpfc_sli *psli = &phba->sli;
2229 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2230 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2231 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2232 int rc;
2233
2234 /*
2235 * if it's not a physical port or if we already send
2236 * clear_la then don't send it.
2237 */
2238 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2239 (vport->port_type != LPFC_PHYSICAL_PORT))
2240 return;
2241
2242 /* Link up discovery */
2243 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2244 phba->link_state = LPFC_CLEAR_LA;
2245 lpfc_clear_la(phba, mbox);
2246 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2247 mbox->vport = vport;
2248 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
2249 MBX_STOP_IOCB));
2250 if (rc == MBX_NOT_FINISHED) {
2251 mempool_free(mbox, phba->mbox_mem_pool);
2252 lpfc_disc_flush_list(vport);
2253 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2254 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2255 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2256 phba->link_state = LPFC_HBA_ERROR;
2257 }
2258 }
2259}
2260
2261/* Reg_vpi to tell firmware to resume normal operations */
2262void
2263lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2264{
2265 LPFC_MBOXQ_t *regvpimbox;
2266
2267 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2268 if (regvpimbox) {
2269 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2270 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2271 regvpimbox->vport = vport;
2272 if (lpfc_sli_issue_mbox(phba, regvpimbox,
2273 (MBX_NOWAIT | MBX_STOP_IOCB))
2274 == MBX_NOT_FINISHED) {
2275 mempool_free(regvpimbox, phba->mbox_mem_pool);
2276 }
2277 }
2278}
2279
2280/* Start Link up / RSCN discovery on NPR nodes */
2281void
2282lpfc_disc_start(struct lpfc_vport *vport)
2283{
2284 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2285 struct lpfc_hba *phba = vport->phba;
1796 uint32_t num_sent; 2286 uint32_t num_sent;
1797 uint32_t clear_la_pending; 2287 uint32_t clear_la_pending;
1798 int did_changed; 2288 int did_changed;
1799 int rc;
1800 2289
1801 psli = &phba->sli; 2290 if (!lpfc_is_link_up(phba))
1802
1803 if (phba->hba_state <= LPFC_LINK_DOWN) {
1804 return; 2291 return;
1805 } 2292
1806 if (phba->hba_state == LPFC_CLEAR_LA) 2293 if (phba->link_state == LPFC_CLEAR_LA)
1807 clear_la_pending = 1; 2294 clear_la_pending = 1;
1808 else 2295 else
1809 clear_la_pending = 0; 2296 clear_la_pending = 0;
1810 2297
1811 if (phba->hba_state < LPFC_HBA_READY) { 2298 if (vport->port_state < LPFC_VPORT_READY)
1812 phba->hba_state = LPFC_DISC_AUTH; 2299 vport->port_state = LPFC_DISC_AUTH;
1813 }
1814 lpfc_set_disctmo(phba);
1815 2300
1816 if (phba->fc_prevDID == phba->fc_myDID) { 2301 lpfc_set_disctmo(vport);
2302
2303 if (vport->fc_prevDID == vport->fc_myDID)
1817 did_changed = 0; 2304 did_changed = 0;
1818 } else { 2305 else
1819 did_changed = 1; 2306 did_changed = 1;
1820 } 2307
1821 phba->fc_prevDID = phba->fc_myDID; 2308 vport->fc_prevDID = vport->fc_myDID;
1822 phba->num_disc_nodes = 0; 2309 vport->num_disc_nodes = 0;
1823 2310
1824 /* Start Discovery state <hba_state> */ 2311 /* Start Discovery state <hba_state> */
1825 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2312 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1826 "%d:0202 Start Discovery hba state x%x " 2313 "%d (%d):0202 Start Discovery hba state x%x "
1827 "Data: x%x x%x x%x\n", 2314 "Data: x%x x%x x%x\n",
1828 phba->brd_no, phba->hba_state, phba->fc_flag, 2315 phba->brd_no, vport->vpi, vport->port_state,
1829 phba->fc_plogi_cnt, phba->fc_adisc_cnt); 2316 vport->fc_flag, vport->fc_plogi_cnt,
1830 2317 vport->fc_adisc_cnt);
1831 /* If our did changed, we MUST do PLOGI */
1832 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
1833 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
1834 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
1835 did_changed) {
1836 spin_lock_irq(phba->host->host_lock);
1837 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1838 spin_unlock_irq(phba->host->host_lock);
1839 }
1840 }
1841 2318
1842 /* First do ADISCs - if any */ 2319 /* First do ADISCs - if any */
1843 num_sent = lpfc_els_disc_adisc(phba); 2320 num_sent = lpfc_els_disc_adisc(vport);
1844 2321
1845 if (num_sent) 2322 if (num_sent)
1846 return; 2323 return;
1847 2324
1848 if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) { 2325 /*
2326 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2327 * continue discovery.
2328 */
2329 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2330 !(vport->fc_flag & FC_RSCN_MODE)) {
2331 lpfc_issue_reg_vpi(phba, vport);
2332 return;
2333 }
2334
2335 /*
2336 * For SLI2, we need to set port_state to READY and continue
2337 * discovery.
2338 */
2339 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
1849 /* If we get here, there is nothing to ADISC */ 2340 /* If we get here, there is nothing to ADISC */
1850 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 2341 if (vport->port_type == LPFC_PHYSICAL_PORT)
1851 phba->hba_state = LPFC_CLEAR_LA; 2342 lpfc_issue_clear_la(phba, vport);
1852 lpfc_clear_la(phba, mbox); 2343
1853 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 2344 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1854 rc = lpfc_sli_issue_mbox(phba, mbox, 2345 vport->num_disc_nodes = 0;
1855 (MBX_NOWAIT | MBX_STOP_IOCB)); 2346 /* go thru NPR nodes and issue ELS PLOGIs */
1856 if (rc == MBX_NOT_FINISHED) { 2347 if (vport->fc_npr_cnt)
1857 mempool_free( mbox, phba->mbox_mem_pool); 2348 lpfc_els_disc_plogi(vport);
1858 lpfc_disc_flush_list(phba); 2349
1859 psli->ring[(psli->extra_ring)].flag &= 2350 if (!vport->num_disc_nodes) {
1860 ~LPFC_STOP_IOCB_EVENT; 2351 spin_lock_irq(shost->host_lock);
1861 psli->ring[(psli->fcp_ring)].flag &= 2352 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1862 ~LPFC_STOP_IOCB_EVENT; 2353 spin_unlock_irq(shost->host_lock);
1863 psli->ring[(psli->next_ring)].flag &= 2354 lpfc_can_disctmo(vport);
1864 ~LPFC_STOP_IOCB_EVENT;
1865 phba->hba_state = LPFC_HBA_READY;
1866 } 2355 }
1867 } 2356 }
2357 vport->port_state = LPFC_VPORT_READY;
1868 } else { 2358 } else {
1869 /* Next do PLOGIs - if any */ 2359 /* Next do PLOGIs - if any */
1870 num_sent = lpfc_els_disc_plogi(phba); 2360 num_sent = lpfc_els_disc_plogi(vport);
1871 2361
1872 if (num_sent) 2362 if (num_sent)
1873 return; 2363 return;
1874 2364
1875 if (phba->fc_flag & FC_RSCN_MODE) { 2365 if (vport->fc_flag & FC_RSCN_MODE) {
1876 /* Check to see if more RSCNs came in while we 2366 /* Check to see if more RSCNs came in while we
1877 * were processing this one. 2367 * were processing this one.
1878 */ 2368 */
1879 if ((phba->fc_rscn_id_cnt == 0) && 2369 if ((vport->fc_rscn_id_cnt == 0) &&
1880 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 2370 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
1881 spin_lock_irq(phba->host->host_lock); 2371 spin_lock_irq(shost->host_lock);
1882 phba->fc_flag &= ~FC_RSCN_MODE; 2372 vport->fc_flag &= ~FC_RSCN_MODE;
1883 spin_unlock_irq(phba->host->host_lock); 2373 spin_unlock_irq(shost->host_lock);
2374 lpfc_can_disctmo(vport);
1884 } else 2375 } else
1885 lpfc_els_handle_rscn(phba); 2376 lpfc_els_handle_rscn(vport);
1886 } 2377 }
1887 } 2378 }
1888 return; 2379 return;
@@ -1893,7 +2384,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
1893 * ring the match the sppecified nodelist. 2384 * ring the match the sppecified nodelist.
1894 */ 2385 */
1895static void 2386static void
1896lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 2387lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1897{ 2388{
1898 LIST_HEAD(completions); 2389 LIST_HEAD(completions);
1899 struct lpfc_sli *psli; 2390 struct lpfc_sli *psli;
@@ -1907,7 +2398,7 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1907 /* Error matching iocb on txq or txcmplq 2398 /* Error matching iocb on txq or txcmplq
1908 * First check the txq. 2399 * First check the txq.
1909 */ 2400 */
1910 spin_lock_irq(phba->host->host_lock); 2401 spin_lock_irq(&phba->hbalock);
1911 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 2402 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1912 if (iocb->context1 != ndlp) { 2403 if (iocb->context1 != ndlp) {
1913 continue; 2404 continue;
@@ -1927,36 +2418,36 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1927 continue; 2418 continue;
1928 } 2419 }
1929 icmd = &iocb->iocb; 2420 icmd = &iocb->iocb;
1930 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || 2421 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
1931 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { 2422 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
1932 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 2423 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1933 } 2424 }
1934 } 2425 }
1935 spin_unlock_irq(phba->host->host_lock); 2426 spin_unlock_irq(&phba->hbalock);
1936 2427
1937 while (!list_empty(&completions)) { 2428 while (!list_empty(&completions)) {
1938 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 2429 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1939 list_del(&iocb->list); 2430 list_del_init(&iocb->list);
1940 2431
1941 if (iocb->iocb_cmpl) { 2432 if (!iocb->iocb_cmpl)
2433 lpfc_sli_release_iocbq(phba, iocb);
2434 else {
1942 icmd = &iocb->iocb; 2435 icmd = &iocb->iocb;
1943 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2436 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1944 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 2437 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1945 (iocb->iocb_cmpl) (phba, iocb, iocb); 2438 (iocb->iocb_cmpl) (phba, iocb, iocb);
1946 } else 2439 }
1947 lpfc_sli_release_iocbq(phba, iocb);
1948 } 2440 }
1949
1950 return;
1951} 2441}
1952 2442
1953void 2443void
1954lpfc_disc_flush_list(struct lpfc_hba * phba) 2444lpfc_disc_flush_list(struct lpfc_vport *vport)
1955{ 2445{
1956 struct lpfc_nodelist *ndlp, *next_ndlp; 2446 struct lpfc_nodelist *ndlp, *next_ndlp;
2447 struct lpfc_hba *phba = vport->phba;
1957 2448
1958 if (phba->fc_plogi_cnt || phba->fc_adisc_cnt) { 2449 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
1959 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, 2450 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
1960 nlp_listp) { 2451 nlp_listp) {
1961 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 2452 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
1962 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 2453 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
@@ -1967,6 +2458,14 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
1967 } 2458 }
1968} 2459}
1969 2460
2461void
2462lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2463{
2464 lpfc_els_flush_rscn(vport);
2465 lpfc_els_flush_cmd(vport);
2466 lpfc_disc_flush_list(vport);
2467}
2468
1970/*****************************************************************************/ 2469/*****************************************************************************/
1971/* 2470/*
1972 * NAME: lpfc_disc_timeout 2471 * NAME: lpfc_disc_timeout
@@ -1985,158 +2484,154 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
1985void 2484void
1986lpfc_disc_timeout(unsigned long ptr) 2485lpfc_disc_timeout(unsigned long ptr)
1987{ 2486{
1988 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 2487 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2488 struct lpfc_hba *phba = vport->phba;
1989 unsigned long flags = 0; 2489 unsigned long flags = 0;
1990 2490
1991 if (unlikely(!phba)) 2491 if (unlikely(!phba))
1992 return; 2492 return;
1993 2493
1994 spin_lock_irqsave(phba->host->host_lock, flags); 2494 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
1995 if (!(phba->work_hba_events & WORKER_DISC_TMO)) { 2495 spin_lock_irqsave(&vport->work_port_lock, flags);
1996 phba->work_hba_events |= WORKER_DISC_TMO; 2496 vport->work_port_events |= WORKER_DISC_TMO;
2497 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2498
2499 spin_lock_irqsave(&phba->hbalock, flags);
1997 if (phba->work_wait) 2500 if (phba->work_wait)
1998 wake_up(phba->work_wait); 2501 lpfc_worker_wake_up(phba);
2502 spin_unlock_irqrestore(&phba->hbalock, flags);
1999 } 2503 }
2000 spin_unlock_irqrestore(phba->host->host_lock, flags);
2001 return; 2504 return;
2002} 2505}
2003 2506
2004static void 2507static void
2005lpfc_disc_timeout_handler(struct lpfc_hba *phba) 2508lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2006{ 2509{
2007 struct lpfc_sli *psli; 2510 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2511 struct lpfc_hba *phba = vport->phba;
2512 struct lpfc_sli *psli = &phba->sli;
2008 struct lpfc_nodelist *ndlp, *next_ndlp; 2513 struct lpfc_nodelist *ndlp, *next_ndlp;
2009 LPFC_MBOXQ_t *clearlambox, *initlinkmbox; 2514 LPFC_MBOXQ_t *initlinkmbox;
2010 int rc, clrlaerr = 0; 2515 int rc, clrlaerr = 0;
2011 2516
2012 if (unlikely(!phba)) 2517 if (!(vport->fc_flag & FC_DISC_TMO))
2013 return; 2518 return;
2014 2519
2015 if (!(phba->fc_flag & FC_DISC_TMO)) 2520 spin_lock_irq(shost->host_lock);
2016 return; 2521 vport->fc_flag &= ~FC_DISC_TMO;
2522 spin_unlock_irq(shost->host_lock);
2017 2523
2018 psli = &phba->sli; 2524 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2525 "disc timeout: state:x%x rtry:x%x flg:x%x",
2526 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2019 2527
2020 spin_lock_irq(phba->host->host_lock); 2528 switch (vport->port_state) {
2021 phba->fc_flag &= ~FC_DISC_TMO;
2022 spin_unlock_irq(phba->host->host_lock);
2023
2024 switch (phba->hba_state) {
2025 2529
2026 case LPFC_LOCAL_CFG_LINK: 2530 case LPFC_LOCAL_CFG_LINK:
2027 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */ 2531 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2028 /* FAN timeout */ 2532 * FAN
2029 lpfc_printf_log(phba, 2533 */
2030 KERN_WARNING, 2534 /* FAN timeout */
2031 LOG_DISCOVERY, 2535 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
2032 "%d:0221 FAN timeout\n", 2536 "%d (%d):0221 FAN timeout\n",
2033 phba->brd_no); 2537 phba->brd_no, vport->vpi);
2034 2538
2035 /* Start discovery by sending FLOGI, clean up old rpis */ 2539 /* Start discovery by sending FLOGI, clean up old rpis */
2036 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, 2540 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2037 nlp_listp) { 2541 nlp_listp) {
2038 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 2542 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2039 continue; 2543 continue;
2040 if (ndlp->nlp_type & NLP_FABRIC) { 2544 if (ndlp->nlp_type & NLP_FABRIC) {
2041 /* Clean up the ndlp on Fabric connections */ 2545 /* Clean up the ndlp on Fabric connections */
2042 lpfc_drop_node(phba, ndlp); 2546 lpfc_drop_node(vport, ndlp);
2043 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 2547 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2044 /* Fail outstanding IO now since device 2548 /* Fail outstanding IO now since device
2045 * is marked for PLOGI. 2549 * is marked for PLOGI.
2046 */ 2550 */
2047 lpfc_unreg_rpi(phba, ndlp); 2551 lpfc_unreg_rpi(vport, ndlp);
2048 } 2552 }
2049 } 2553 }
2050 phba->hba_state = LPFC_FLOGI; 2554 if (vport->port_state != LPFC_FLOGI) {
2051 lpfc_set_disctmo(phba); 2555 vport->port_state = LPFC_FLOGI;
2052 lpfc_initial_flogi(phba); 2556 lpfc_set_disctmo(vport);
2557 lpfc_initial_flogi(vport);
2558 }
2053 break; 2559 break;
2054 2560
2561 case LPFC_FDISC:
2055 case LPFC_FLOGI: 2562 case LPFC_FLOGI:
2056 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 2563 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2057 /* Initial FLOGI timeout */ 2564 /* Initial FLOGI timeout */
2058 lpfc_printf_log(phba, 2565 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2059 KERN_ERR, 2566 "%d (%d):0222 Initial %s timeout\n",
2060 LOG_DISCOVERY, 2567 phba->brd_no, vport->vpi,
2061 "%d:0222 Initial FLOGI timeout\n", 2568 vport->vpi ? "FLOGI" : "FDISC");
2062 phba->brd_no);
2063 2569
2064 /* Assume no Fabric and go on with discovery. 2570 /* Assume no Fabric and go on with discovery.
2065 * Check for outstanding ELS FLOGI to abort. 2571 * Check for outstanding ELS FLOGI to abort.
2066 */ 2572 */
2067 2573
2068 /* FLOGI failed, so just use loop map to make discovery list */ 2574 /* FLOGI failed, so just use loop map to make discovery list */
2069 lpfc_disc_list_loopmap(phba); 2575 lpfc_disc_list_loopmap(vport);
2070 2576
2071 /* Start discovery */ 2577 /* Start discovery */
2072 lpfc_disc_start(phba); 2578 lpfc_disc_start(vport);
2073 break; 2579 break;
2074 2580
2075 case LPFC_FABRIC_CFG_LINK: 2581 case LPFC_FABRIC_CFG_LINK:
2076 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 2582 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2077 NameServer login */ 2583 NameServer login */
2078 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2584 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2079 "%d:0223 Timeout while waiting for NameServer " 2585 "%d (%d):0223 Timeout while waiting for "
2080 "login\n", phba->brd_no); 2586 "NameServer login\n",
2587 phba->brd_no, vport->vpi);
2081 2588
2082 /* Next look for NameServer ndlp */ 2589 /* Next look for NameServer ndlp */
2083 ndlp = lpfc_findnode_did(phba, NameServer_DID); 2590 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2084 if (ndlp) 2591 if (ndlp)
2085 lpfc_nlp_put(ndlp); 2592 lpfc_nlp_put(ndlp);
2086 /* Start discovery */ 2593 /* Start discovery */
2087 lpfc_disc_start(phba); 2594 lpfc_disc_start(vport);
2088 break; 2595 break;
2089 2596
2090 case LPFC_NS_QRY: 2597 case LPFC_NS_QRY:
2091 /* Check for wait for NameServer Rsp timeout */ 2598 /* Check for wait for NameServer Rsp timeout */
2092 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2599 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2093 "%d:0224 NameServer Query timeout " 2600 "%d (%d):0224 NameServer Query timeout "
2094 "Data: x%x x%x\n", 2601 "Data: x%x x%x\n",
2095 phba->brd_no, 2602 phba->brd_no, vport->vpi,
2096 phba->fc_ns_retry, LPFC_MAX_NS_RETRY); 2603 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2097 2604
2098 ndlp = lpfc_findnode_did(phba, NameServer_DID); 2605 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2099 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 2606 /* Try it one more time */
2100 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) { 2607 vport->fc_ns_retry++;
2101 /* Try it one more time */ 2608 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2102 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT); 2609 vport->fc_ns_retry, 0);
2103 if (rc == 0) 2610 if (rc == 0)
2104 break; 2611 break;
2105 }
2106 phba->fc_ns_retry = 0;
2107 } 2612 }
2613 vport->fc_ns_retry = 0;
2108 2614
2109 /* Nothing to authenticate, so CLEAR_LA right now */ 2615 /*
2110 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2616 * Discovery is over.
2111 if (!clearlambox) { 2617 * set port_state to PORT_READY if SLI2.
2112 clrlaerr = 1; 2618 * cmpl_reg_vpi will set port_state to READY for SLI3.
2113 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2619 */
2114 "%d:0226 Device Discovery " 2620 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2115 "completion error\n", 2621 lpfc_issue_reg_vpi(phba, vport);
2116 phba->brd_no); 2622 else { /* NPIV Not enabled */
2117 phba->hba_state = LPFC_HBA_ERROR; 2623 lpfc_issue_clear_la(phba, vport);
2118 break; 2624 vport->port_state = LPFC_VPORT_READY;
2119 }
2120
2121 phba->hba_state = LPFC_CLEAR_LA;
2122 lpfc_clear_la(phba, clearlambox);
2123 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2124 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2125 (MBX_NOWAIT | MBX_STOP_IOCB));
2126 if (rc == MBX_NOT_FINISHED) {
2127 mempool_free(clearlambox, phba->mbox_mem_pool);
2128 clrlaerr = 1;
2129 break;
2130 } 2625 }
2131 2626
2132 /* Setup and issue mailbox INITIALIZE LINK command */ 2627 /* Setup and issue mailbox INITIALIZE LINK command */
2133 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2628 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2134 if (!initlinkmbox) { 2629 if (!initlinkmbox) {
2135 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2630 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2136 "%d:0206 Device Discovery " 2631 "%d (%d):0206 Device Discovery "
2137 "completion error\n", 2632 "completion error\n",
2138 phba->brd_no); 2633 phba->brd_no, vport->vpi);
2139 phba->hba_state = LPFC_HBA_ERROR; 2634 phba->link_state = LPFC_HBA_ERROR;
2140 break; 2635 break;
2141 } 2636 }
2142 2637
@@ -2144,6 +2639,8 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2144 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 2639 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2145 phba->cfg_link_speed); 2640 phba->cfg_link_speed);
2146 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 2641 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2642 initlinkmbox->vport = vport;
2643 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2147 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, 2644 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2148 (MBX_NOWAIT | MBX_STOP_IOCB)); 2645 (MBX_NOWAIT | MBX_STOP_IOCB));
2149 lpfc_set_loopback_flag(phba); 2646 lpfc_set_loopback_flag(phba);
@@ -2154,67 +2651,81 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2154 2651
2155 case LPFC_DISC_AUTH: 2652 case LPFC_DISC_AUTH:
2156 /* Node Authentication timeout */ 2653 /* Node Authentication timeout */
2157 lpfc_printf_log(phba, 2654 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2158 KERN_ERR, 2655 "%d (%d):0227 Node Authentication timeout\n",
2159 LOG_DISCOVERY, 2656 phba->brd_no, vport->vpi);
2160 "%d:0227 Node Authentication timeout\n", 2657 lpfc_disc_flush_list(vport);
2161 phba->brd_no); 2658
2162 lpfc_disc_flush_list(phba); 2659 /*
2163 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2660 * set port_state to PORT_READY if SLI2.
2164 if (!clearlambox) { 2661 * cmpl_reg_vpi will set port_state to READY for SLI3.
2165 clrlaerr = 1; 2662 */
2166 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2663 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2167 "%d:0207 Device Discovery " 2664 lpfc_issue_reg_vpi(phba, vport);
2168 "completion error\n", 2665 else { /* NPIV Not enabled */
2169 phba->brd_no); 2666 lpfc_issue_clear_la(phba, vport);
2170 phba->hba_state = LPFC_HBA_ERROR; 2667 vport->port_state = LPFC_VPORT_READY;
2171 break;
2172 } 2668 }
2173 phba->hba_state = LPFC_CLEAR_LA; 2669 break;
2174 lpfc_clear_la(phba, clearlambox); 2670
2175 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 2671 case LPFC_VPORT_READY:
2176 rc = lpfc_sli_issue_mbox(phba, clearlambox, 2672 if (vport->fc_flag & FC_RSCN_MODE) {
2177 (MBX_NOWAIT | MBX_STOP_IOCB)); 2673 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2178 if (rc == MBX_NOT_FINISHED) { 2674 "%d (%d):0231 RSCN timeout Data: x%x "
2179 mempool_free(clearlambox, phba->mbox_mem_pool); 2675 "x%x\n",
2180 clrlaerr = 1; 2676 phba->brd_no, vport->vpi,
2677 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2678
2679 /* Cleanup any outstanding ELS commands */
2680 lpfc_els_flush_cmd(vport);
2681
2682 lpfc_els_flush_rscn(vport);
2683 lpfc_disc_flush_list(vport);
2181 } 2684 }
2182 break; 2685 break;
2183 2686
2687 default:
2688 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2689 "%d (%d):0229 Unexpected discovery timeout, "
2690 "vport State x%x\n",
2691 phba->brd_no, vport->vpi, vport->port_state);
2692
2693 break;
2694 }
2695
2696 switch (phba->link_state) {
2184 case LPFC_CLEAR_LA: 2697 case LPFC_CLEAR_LA:
2185 /* CLEAR LA timeout */ 2698 /* CLEAR LA timeout */
2186 lpfc_printf_log(phba, 2699 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2187 KERN_ERR, 2700 "%d (%d):0228 CLEAR LA timeout\n",
2188 LOG_DISCOVERY, 2701 phba->brd_no, vport->vpi);
2189 "%d:0228 CLEAR LA timeout\n",
2190 phba->brd_no);
2191 clrlaerr = 1; 2702 clrlaerr = 1;
2192 break; 2703 break;
2193 2704
2194 case LPFC_HBA_READY: 2705 case LPFC_LINK_UNKNOWN:
2195 if (phba->fc_flag & FC_RSCN_MODE) { 2706 case LPFC_WARM_START:
2196 lpfc_printf_log(phba, 2707 case LPFC_INIT_START:
2197 KERN_ERR, 2708 case LPFC_INIT_MBX_CMDS:
2198 LOG_DISCOVERY, 2709 case LPFC_LINK_DOWN:
2199 "%d:0231 RSCN timeout Data: x%x x%x\n", 2710 case LPFC_LINK_UP:
2200 phba->brd_no, 2711 case LPFC_HBA_ERROR:
2201 phba->fc_ns_retry, LPFC_MAX_NS_RETRY); 2712 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2202 2713 "%d (%d):0230 Unexpected timeout, hba link "
2203 /* Cleanup any outstanding ELS commands */ 2714 "state x%x\n",
2204 lpfc_els_flush_cmd(phba); 2715 phba->brd_no, vport->vpi, phba->link_state);
2716 clrlaerr = 1;
2717 break;
2205 2718
2206 lpfc_els_flush_rscn(phba); 2719 case LPFC_HBA_READY:
2207 lpfc_disc_flush_list(phba);
2208 }
2209 break; 2720 break;
2210 } 2721 }
2211 2722
2212 if (clrlaerr) { 2723 if (clrlaerr) {
2213 lpfc_disc_flush_list(phba); 2724 lpfc_disc_flush_list(vport);
2214 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2725 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2215 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2726 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2216 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2727 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2217 phba->hba_state = LPFC_HBA_READY; 2728 vport->port_state = LPFC_VPORT_READY;
2218 } 2729 }
2219 2730
2220 return; 2731 return;
@@ -2227,37 +2738,29 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2227 * handed off to the SLI layer. 2738 * handed off to the SLI layer.
2228 */ 2739 */
2229void 2740void
2230lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 2741lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2231{ 2742{
2232 struct lpfc_sli *psli; 2743 MAILBOX_t *mb = &pmb->mb;
2233 MAILBOX_t *mb; 2744 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2234 struct lpfc_dmabuf *mp; 2745 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2235 struct lpfc_nodelist *ndlp; 2746 struct lpfc_vport *vport = pmb->vport;
2236
2237 psli = &phba->sli;
2238 mb = &pmb->mb;
2239
2240 ndlp = (struct lpfc_nodelist *) pmb->context2;
2241 mp = (struct lpfc_dmabuf *) (pmb->context1);
2242 2747
2243 pmb->context1 = NULL; 2748 pmb->context1 = NULL;
2244 2749
2245 ndlp->nlp_rpi = mb->un.varWords[0]; 2750 ndlp->nlp_rpi = mb->un.varWords[0];
2246 ndlp->nlp_type |= NLP_FABRIC; 2751 ndlp->nlp_type |= NLP_FABRIC;
2247 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 2752 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2248 2753
2249 /* Start issuing Fabric-Device Management Interface (FDMI) 2754 /*
2250 * command to 0xfffffa (FDMI well known port) 2755 * Start issuing Fabric-Device Management Interface (FDMI) command to
2756 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2757 * fdmi-on=2 (supporting RPA/hostnmae)
2251 */ 2758 */
2252 if (phba->cfg_fdmi_on == 1) { 2759
2253 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA); 2760 if (phba->cfg_fdmi_on == 1)
2254 } else { 2761 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2255 /* 2762 else
2256 * Delay issuing FDMI command if fdmi-on=2 2763 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2257 * (supporting RPA/hostnmae)
2258 */
2259 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2260 }
2261 2764
2262 /* Mailbox took a reference to the node */ 2765 /* Mailbox took a reference to the node */
2263 lpfc_nlp_put(ndlp); 2766 lpfc_nlp_put(ndlp);
@@ -2283,16 +2786,12 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2283 sizeof(ndlp->nlp_portname)) == 0; 2786 sizeof(ndlp->nlp_portname)) == 0;
2284} 2787}
2285 2788
2286/*
2287 * Search node lists for a remote port matching filter criteria
2288 * Caller needs to hold host_lock before calling this routine.
2289 */
2290struct lpfc_nodelist * 2789struct lpfc_nodelist *
2291__lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) 2790__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2292{ 2791{
2293 struct lpfc_nodelist *ndlp; 2792 struct lpfc_nodelist *ndlp;
2294 2793
2295 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 2794 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2296 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE && 2795 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2297 filter(ndlp, param)) 2796 filter(ndlp, param))
2298 return ndlp; 2797 return ndlp;
@@ -2302,68 +2801,104 @@ __lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
2302 2801
2303/* 2802/*
2304 * Search node lists for a remote port matching filter criteria 2803 * Search node lists for a remote port matching filter criteria
2305 * This routine is used when the caller does NOT have host_lock. 2804 * Caller needs to hold host_lock before calling this routine.
2306 */ 2805 */
2307struct lpfc_nodelist * 2806struct lpfc_nodelist *
2308lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) 2807lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2309{ 2808{
2809 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2310 struct lpfc_nodelist *ndlp; 2810 struct lpfc_nodelist *ndlp;
2311 2811
2312 spin_lock_irq(phba->host->host_lock); 2812 spin_lock_irq(shost->host_lock);
2313 ndlp = __lpfc_find_node(phba, filter, param); 2813 ndlp = __lpfc_find_node(vport, filter, param);
2314 spin_unlock_irq(phba->host->host_lock); 2814 spin_unlock_irq(shost->host_lock);
2315 return ndlp; 2815 return ndlp;
2316} 2816}
2317 2817
2318/* 2818/*
2319 * This routine looks up the ndlp lists for the given RPI. If rpi found it 2819 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2320 * returns the node list pointer else return NULL. 2820 * returns the node list element pointer else return NULL.
2321 */ 2821 */
2322struct lpfc_nodelist * 2822struct lpfc_nodelist *
2323__lpfc_findnode_rpi(struct lpfc_hba *phba, uint16_t rpi) 2823__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2324{ 2824{
2325 return __lpfc_find_node(phba, lpfc_filter_by_rpi, &rpi); 2825 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2326} 2826}
2327 2827
2328struct lpfc_nodelist * 2828struct lpfc_nodelist *
2329lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi) 2829lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2330{ 2830{
2831 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2331 struct lpfc_nodelist *ndlp; 2832 struct lpfc_nodelist *ndlp;
2332 2833
2333 spin_lock_irq(phba->host->host_lock); 2834 spin_lock_irq(shost->host_lock);
2334 ndlp = __lpfc_findnode_rpi(phba, rpi); 2835 ndlp = __lpfc_findnode_rpi(vport, rpi);
2335 spin_unlock_irq(phba->host->host_lock); 2836 spin_unlock_irq(shost->host_lock);
2336 return ndlp; 2837 return ndlp;
2337} 2838}
2338 2839
2339/* 2840/*
2340 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it 2841 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2341 * returns the node list pointer else return NULL. 2842 * returns the node element list pointer else return NULL.
2342 */ 2843 */
2343struct lpfc_nodelist * 2844struct lpfc_nodelist *
2344lpfc_findnode_wwpn(struct lpfc_hba *phba, struct lpfc_name *wwpn) 2845lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2345{ 2846{
2847 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2346 struct lpfc_nodelist *ndlp; 2848 struct lpfc_nodelist *ndlp;
2347 2849
2348 spin_lock_irq(phba->host->host_lock); 2850 spin_lock_irq(shost->host_lock);
2349 ndlp = __lpfc_find_node(phba, lpfc_filter_by_wwpn, wwpn); 2851 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2350 spin_unlock_irq(phba->host->host_lock); 2852 spin_unlock_irq(shost->host_lock);
2351 return NULL; 2853 return ndlp;
2352} 2854}
2353 2855
2354void 2856void
2355lpfc_nlp_init(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) 2857lpfc_dev_loss_delay(unsigned long ptr)
2858{
2859 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2860 struct lpfc_vport *vport = ndlp->vport;
2861 struct lpfc_hba *phba = vport->phba;
2862 struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
2863 unsigned long flags;
2864
2865 evtp = &ndlp->dev_loss_evt;
2866
2867 spin_lock_irqsave(&phba->hbalock, flags);
2868 if (!list_empty(&evtp->evt_listp)) {
2869 spin_unlock_irqrestore(&phba->hbalock, flags);
2870 return;
2871 }
2872
2873 evtp->evt_arg1 = ndlp;
2874 evtp->evt = LPFC_EVT_DEV_LOSS_DELAY;
2875 list_add_tail(&evtp->evt_listp, &phba->work_list);
2876 if (phba->work_wait)
2877 lpfc_worker_wake_up(phba);
2878 spin_unlock_irqrestore(&phba->hbalock, flags);
2879 return;
2880}
2881
2882void
2883lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2884 uint32_t did)
2356{ 2885{
2357 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 2886 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2358 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2887 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2888 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2359 init_timer(&ndlp->nlp_delayfunc); 2889 init_timer(&ndlp->nlp_delayfunc);
2360 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 2890 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2361 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 2891 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2362 ndlp->nlp_DID = did; 2892 ndlp->nlp_DID = did;
2363 ndlp->nlp_phba = phba; 2893 ndlp->vport = vport;
2364 ndlp->nlp_sid = NLP_NO_SID; 2894 ndlp->nlp_sid = NLP_NO_SID;
2365 INIT_LIST_HEAD(&ndlp->nlp_listp); 2895 INIT_LIST_HEAD(&ndlp->nlp_listp);
2366 kref_init(&ndlp->kref); 2896 kref_init(&ndlp->kref);
2897
2898 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2899 "node init: did:x%x",
2900 ndlp->nlp_DID, 0, 0);
2901
2367 return; 2902 return;
2368} 2903}
2369 2904
@@ -2372,8 +2907,13 @@ lpfc_nlp_release(struct kref *kref)
2372{ 2907{
2373 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 2908 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2374 kref); 2909 kref);
2375 lpfc_nlp_remove(ndlp->nlp_phba, ndlp); 2910
2376 mempool_free(ndlp, ndlp->nlp_phba->nlp_mem_pool); 2911 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2912 "node release: did:x%x flg:x%x type:x%x",
2913 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2914
2915 lpfc_nlp_remove(ndlp->vport, ndlp);
2916 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2377} 2917}
2378 2918
2379struct lpfc_nodelist * 2919struct lpfc_nodelist *
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 2623a9bc7775..c2fb59f595f3 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -59,6 +59,12 @@
59#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24 59#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
60#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32 60#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
61 61
62#define SLI2_IOCB_CMD_SIZE 32
63#define SLI2_IOCB_RSP_SIZE 32
64#define SLI3_IOCB_CMD_SIZE 128
65#define SLI3_IOCB_RSP_SIZE 64
66
67
62/* Common Transport structures and definitions */ 68/* Common Transport structures and definitions */
63 69
64union CtRevisionId { 70union CtRevisionId {
@@ -79,6 +85,9 @@ union CtCommandResponse {
79 uint32_t word; 85 uint32_t word;
80}; 86};
81 87
88#define FC4_FEATURE_INIT 0x2
89#define FC4_FEATURE_TARGET 0x1
90
82struct lpfc_sli_ct_request { 91struct lpfc_sli_ct_request {
83 /* Structure is in Big Endian format */ 92 /* Structure is in Big Endian format */
84 union CtRevisionId RevisionId; 93 union CtRevisionId RevisionId;
@@ -121,20 +130,6 @@ struct lpfc_sli_ct_request {
121 130
122 uint32_t rsvd[7]; 131 uint32_t rsvd[7];
123 } rft; 132 } rft;
124 struct rff {
125 uint32_t PortId;
126 uint8_t reserved[2];
127#ifdef __BIG_ENDIAN_BITFIELD
128 uint8_t feature_res:6;
129 uint8_t feature_init:1;
130 uint8_t feature_tgt:1;
131#else /* __LITTLE_ENDIAN_BITFIELD */
132 uint8_t feature_tgt:1;
133 uint8_t feature_init:1;
134 uint8_t feature_res:6;
135#endif
136 uint8_t type_code; /* type=8 for FCP */
137 } rff;
138 struct rnn { 133 struct rnn {
139 uint32_t PortId; /* For RNN_ID requests */ 134 uint32_t PortId; /* For RNN_ID requests */
140 uint8_t wwnn[8]; 135 uint8_t wwnn[8];
@@ -144,15 +139,42 @@ struct lpfc_sli_ct_request {
144 uint8_t len; 139 uint8_t len;
145 uint8_t symbname[255]; 140 uint8_t symbname[255];
146 } rsnn; 141 } rsnn;
142 struct rspn { /* For RSPN_ID requests */
143 uint32_t PortId;
144 uint8_t len;
145 uint8_t symbname[255];
146 } rspn;
147 struct gff {
148 uint32_t PortId;
149 } gff;
150 struct gff_acc {
151 uint8_t fbits[128];
152 } gff_acc;
153#define FCP_TYPE_FEATURE_OFFSET 4
154 struct rff {
155 uint32_t PortId;
156 uint8_t reserved[2];
157 uint8_t fbits;
158 uint8_t type_code; /* type=8 for FCP */
159 } rff;
147 } un; 160 } un;
148}; 161};
149 162
150#define SLI_CT_REVISION 1 163#define SLI_CT_REVISION 1
151#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260) 164#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
152#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228) 165 sizeof(struct gid))
153#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235) 166#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
154#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252) 167 sizeof(struct gff))
155#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request)) 168#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
169 sizeof(struct rft))
170#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
171 sizeof(struct rff))
172#define RNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
173 sizeof(struct rnn))
174#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
175 sizeof(struct rsnn))
176#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
177 sizeof(struct rspn))
156 178
157/* 179/*
158 * FsType Definitions 180 * FsType Definitions
@@ -227,6 +249,7 @@ struct lpfc_sli_ct_request {
227#define SLI_CTNS_GFT_ID 0x0117 249#define SLI_CTNS_GFT_ID 0x0117
228#define SLI_CTNS_GSPN_ID 0x0118 250#define SLI_CTNS_GSPN_ID 0x0118
229#define SLI_CTNS_GPT_ID 0x011A 251#define SLI_CTNS_GPT_ID 0x011A
252#define SLI_CTNS_GFF_ID 0x011F
230#define SLI_CTNS_GID_PN 0x0121 253#define SLI_CTNS_GID_PN 0x0121
231#define SLI_CTNS_GID_NN 0x0131 254#define SLI_CTNS_GID_NN 0x0131
232#define SLI_CTNS_GIP_NN 0x0135 255#define SLI_CTNS_GIP_NN 0x0135
@@ -240,9 +263,9 @@ struct lpfc_sli_ct_request {
240#define SLI_CTNS_RNN_ID 0x0213 263#define SLI_CTNS_RNN_ID 0x0213
241#define SLI_CTNS_RCS_ID 0x0214 264#define SLI_CTNS_RCS_ID 0x0214
242#define SLI_CTNS_RFT_ID 0x0217 265#define SLI_CTNS_RFT_ID 0x0217
243#define SLI_CTNS_RFF_ID 0x021F
244#define SLI_CTNS_RSPN_ID 0x0218 266#define SLI_CTNS_RSPN_ID 0x0218
245#define SLI_CTNS_RPT_ID 0x021A 267#define SLI_CTNS_RPT_ID 0x021A
268#define SLI_CTNS_RFF_ID 0x021F
246#define SLI_CTNS_RIP_NN 0x0235 269#define SLI_CTNS_RIP_NN 0x0235
247#define SLI_CTNS_RIPA_NN 0x0236 270#define SLI_CTNS_RIPA_NN 0x0236
248#define SLI_CTNS_RSNN_NN 0x0239 271#define SLI_CTNS_RSNN_NN 0x0239
@@ -311,9 +334,9 @@ struct csp {
311 uint8_t bbCreditlsb; /* FC Word 0, byte 3 */ 334 uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
312 335
313#ifdef __BIG_ENDIAN_BITFIELD 336#ifdef __BIG_ENDIAN_BITFIELD
314 uint16_t increasingOffset:1; /* FC Word 1, bit 31 */ 337 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
315 uint16_t randomOffset:1; /* FC Word 1, bit 30 */ 338 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
316 uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */ 339 uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
317 uint16_t fPort:1; /* FC Word 1, bit 28 */ 340 uint16_t fPort:1; /* FC Word 1, bit 28 */
318 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ 341 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
319 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ 342 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
@@ -332,9 +355,9 @@ struct csp {
332 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ 355 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
333 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ 356 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
334 uint16_t fPort:1; /* FC Word 1, bit 28 */ 357 uint16_t fPort:1; /* FC Word 1, bit 28 */
335 uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */ 358 uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
336 uint16_t randomOffset:1; /* FC Word 1, bit 30 */ 359 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
337 uint16_t increasingOffset:1; /* FC Word 1, bit 31 */ 360 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
338 361
339 uint16_t payloadlength:1; /* FC Word 1, bit 16 */ 362 uint16_t payloadlength:1; /* FC Word 1, bit 16 */
340 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ 363 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
@@ -1255,7 +1278,9 @@ typedef struct { /* FireFly BIU registers */
1255#define MBX_KILL_BOARD 0x24 1278#define MBX_KILL_BOARD 0x24
1256#define MBX_CONFIG_FARP 0x25 1279#define MBX_CONFIG_FARP 0x25
1257#define MBX_BEACON 0x2A 1280#define MBX_BEACON 0x2A
1281#define MBX_HEARTBEAT 0x31
1258 1282
1283#define MBX_CONFIG_HBQ 0x7C
1259#define MBX_LOAD_AREA 0x81 1284#define MBX_LOAD_AREA 0x81
1260#define MBX_RUN_BIU_DIAG64 0x84 1285#define MBX_RUN_BIU_DIAG64 0x84
1261#define MBX_CONFIG_PORT 0x88 1286#define MBX_CONFIG_PORT 0x88
@@ -1263,6 +1288,10 @@ typedef struct { /* FireFly BIU registers */
1263#define MBX_READ_RPI64 0x8F 1288#define MBX_READ_RPI64 0x8F
1264#define MBX_REG_LOGIN64 0x93 1289#define MBX_REG_LOGIN64 0x93
1265#define MBX_READ_LA64 0x95 1290#define MBX_READ_LA64 0x95
1291#define MBX_REG_VPI 0x96
1292#define MBX_UNREG_VPI 0x97
1293#define MBX_REG_VNPID 0x96
1294#define MBX_UNREG_VNPID 0x97
1266 1295
1267#define MBX_FLASH_WR_ULA 0x98 1296#define MBX_FLASH_WR_ULA 0x98
1268#define MBX_SET_DEBUG 0x99 1297#define MBX_SET_DEBUG 0x99
@@ -1335,6 +1364,10 @@ typedef struct { /* FireFly BIU registers */
1335#define CMD_FCP_TRECEIVE64_CX 0xA1 1364#define CMD_FCP_TRECEIVE64_CX 0xA1
1336#define CMD_FCP_TRSP64_CX 0xA3 1365#define CMD_FCP_TRSP64_CX 0xA3
1337 1366
1367#define CMD_IOCB_RCV_SEQ64_CX 0xB5
1368#define CMD_IOCB_RCV_ELS64_CX 0xB7
1369#define CMD_IOCB_RCV_CONT64_CX 0xBB
1370
1338#define CMD_GEN_REQUEST64_CR 0xC2 1371#define CMD_GEN_REQUEST64_CR 0xC2
1339#define CMD_GEN_REQUEST64_CX 0xC3 1372#define CMD_GEN_REQUEST64_CX 0xC3
1340 1373
@@ -1561,6 +1594,7 @@ typedef struct {
1561#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */ 1594#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
1562#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */ 1595#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
1563#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */ 1596#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
1597#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */
1564#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */ 1598#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
1565 1599
1566#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ 1600#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
@@ -1744,8 +1778,6 @@ typedef struct {
1744#define LMT_4Gb 0x040 1778#define LMT_4Gb 0x040
1745#define LMT_8Gb 0x080 1779#define LMT_8Gb 0x080
1746#define LMT_10Gb 0x100 1780#define LMT_10Gb 0x100
1747
1748
1749 uint32_t rsvd2; 1781 uint32_t rsvd2;
1750 uint32_t rsvd3; 1782 uint32_t rsvd3;
1751 uint32_t max_xri; 1783 uint32_t max_xri;
@@ -1754,7 +1786,10 @@ typedef struct {
1754 uint32_t avail_xri; 1786 uint32_t avail_xri;
1755 uint32_t avail_iocb; 1787 uint32_t avail_iocb;
1756 uint32_t avail_rpi; 1788 uint32_t avail_rpi;
1757 uint32_t default_rpi; 1789 uint32_t max_vpi;
1790 uint32_t rsvd4;
1791 uint32_t rsvd5;
1792 uint32_t avail_vpi;
1758} READ_CONFIG_VAR; 1793} READ_CONFIG_VAR;
1759 1794
1760/* Structure for MB Command READ_RCONFIG (12) */ 1795/* Structure for MB Command READ_RCONFIG (12) */
@@ -1818,6 +1853,13 @@ typedef struct {
1818 structure */ 1853 structure */
1819 struct ulp_bde64 sp64; 1854 struct ulp_bde64 sp64;
1820 } un; 1855 } un;
1856#ifdef __BIG_ENDIAN_BITFIELD
1857 uint16_t rsvd3;
1858 uint16_t vpi;
1859#else /* __LITTLE_ENDIAN_BITFIELD */
1860 uint16_t vpi;
1861 uint16_t rsvd3;
1862#endif
1821} READ_SPARM_VAR; 1863} READ_SPARM_VAR;
1822 1864
1823/* Structure for MB Command READ_STATUS (14) */ 1865/* Structure for MB Command READ_STATUS (14) */
@@ -1918,11 +1960,17 @@ typedef struct {
1918#ifdef __BIG_ENDIAN_BITFIELD 1960#ifdef __BIG_ENDIAN_BITFIELD
1919 uint32_t cv:1; 1961 uint32_t cv:1;
1920 uint32_t rr:1; 1962 uint32_t rr:1;
1921 uint32_t rsvd1:29; 1963 uint32_t rsvd2:2;
1964 uint32_t v3req:1;
1965 uint32_t v3rsp:1;
1966 uint32_t rsvd1:25;
1922 uint32_t rv:1; 1967 uint32_t rv:1;
1923#else /* __LITTLE_ENDIAN_BITFIELD */ 1968#else /* __LITTLE_ENDIAN_BITFIELD */
1924 uint32_t rv:1; 1969 uint32_t rv:1;
1925 uint32_t rsvd1:29; 1970 uint32_t rsvd1:25;
1971 uint32_t v3rsp:1;
1972 uint32_t v3req:1;
1973 uint32_t rsvd2:2;
1926 uint32_t rr:1; 1974 uint32_t rr:1;
1927 uint32_t cv:1; 1975 uint32_t cv:1;
1928#endif 1976#endif
@@ -1972,8 +2020,8 @@ typedef struct {
1972 uint8_t sli1FwName[16]; 2020 uint8_t sli1FwName[16];
1973 uint32_t sli2FwRev; 2021 uint32_t sli2FwRev;
1974 uint8_t sli2FwName[16]; 2022 uint8_t sli2FwName[16];
1975 uint32_t rsvd2; 2023 uint32_t sli3Feat;
1976 uint32_t RandomData[7]; 2024 uint32_t RandomData[6];
1977} READ_REV_VAR; 2025} READ_REV_VAR;
1978 2026
1979/* Structure for MB Command READ_LINK_STAT (18) */ 2027/* Structure for MB Command READ_LINK_STAT (18) */
@@ -2013,6 +2061,14 @@ typedef struct {
2013 struct ulp_bde64 sp64; 2061 struct ulp_bde64 sp64;
2014 } un; 2062 } un;
2015 2063
2064#ifdef __BIG_ENDIAN_BITFIELD
2065 uint16_t rsvd6;
2066 uint16_t vpi;
2067#else /* __LITTLE_ENDIAN_BITFIELD */
2068 uint16_t vpi;
2069 uint16_t rsvd6;
2070#endif
2071
2016} REG_LOGIN_VAR; 2072} REG_LOGIN_VAR;
2017 2073
2018/* Word 30 contents for REG_LOGIN */ 2074/* Word 30 contents for REG_LOGIN */
@@ -2037,16 +2093,78 @@ typedef struct {
2037#ifdef __BIG_ENDIAN_BITFIELD 2093#ifdef __BIG_ENDIAN_BITFIELD
2038 uint16_t rsvd1; 2094 uint16_t rsvd1;
2039 uint16_t rpi; 2095 uint16_t rpi;
2096 uint32_t rsvd2;
2097 uint32_t rsvd3;
2098 uint32_t rsvd4;
2099 uint32_t rsvd5;
2100 uint16_t rsvd6;
2101 uint16_t vpi;
2040#else /* __LITTLE_ENDIAN_BITFIELD */ 2102#else /* __LITTLE_ENDIAN_BITFIELD */
2041 uint16_t rpi; 2103 uint16_t rpi;
2042 uint16_t rsvd1; 2104 uint16_t rsvd1;
2105 uint32_t rsvd2;
2106 uint32_t rsvd3;
2107 uint32_t rsvd4;
2108 uint32_t rsvd5;
2109 uint16_t vpi;
2110 uint16_t rsvd6;
2043#endif 2111#endif
2044} UNREG_LOGIN_VAR; 2112} UNREG_LOGIN_VAR;
2045 2113
2114/* Structure for MB Command REG_VPI (0x96) */
2115typedef struct {
2116#ifdef __BIG_ENDIAN_BITFIELD
2117 uint32_t rsvd1;
2118 uint32_t rsvd2:8;
2119 uint32_t sid:24;
2120 uint32_t rsvd3;
2121 uint32_t rsvd4;
2122 uint32_t rsvd5;
2123 uint16_t rsvd6;
2124 uint16_t vpi;
2125#else /* __LITTLE_ENDIAN */
2126 uint32_t rsvd1;
2127 uint32_t sid:24;
2128 uint32_t rsvd2:8;
2129 uint32_t rsvd3;
2130 uint32_t rsvd4;
2131 uint32_t rsvd5;
2132 uint16_t vpi;
2133 uint16_t rsvd6;
2134#endif
2135} REG_VPI_VAR;
2136
2137/* Structure for MB Command UNREG_VPI (0x97) */
2138typedef struct {
2139 uint32_t rsvd1;
2140 uint32_t rsvd2;
2141 uint32_t rsvd3;
2142 uint32_t rsvd4;
2143 uint32_t rsvd5;
2144#ifdef __BIG_ENDIAN_BITFIELD
2145 uint16_t rsvd6;
2146 uint16_t vpi;
2147#else /* __LITTLE_ENDIAN */
2148 uint16_t vpi;
2149 uint16_t rsvd6;
2150#endif
2151} UNREG_VPI_VAR;
2152
2046/* Structure for MB Command UNREG_D_ID (0x23) */ 2153/* Structure for MB Command UNREG_D_ID (0x23) */
2047 2154
2048typedef struct { 2155typedef struct {
2049 uint32_t did; 2156 uint32_t did;
2157 uint32_t rsvd2;
2158 uint32_t rsvd3;
2159 uint32_t rsvd4;
2160 uint32_t rsvd5;
2161#ifdef __BIG_ENDIAN_BITFIELD
2162 uint16_t rsvd6;
2163 uint16_t vpi;
2164#else
2165 uint16_t vpi;
2166 uint16_t rsvd6;
2167#endif
2050} UNREG_D_ID_VAR; 2168} UNREG_D_ID_VAR;
2051 2169
2052/* Structure for MB Command READ_LA (21) */ 2170/* Structure for MB Command READ_LA (21) */
@@ -2178,13 +2296,240 @@ typedef struct {
2178#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2296#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2179#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2297#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2180 2298
2181/* Structure for MB Command CONFIG_PORT (0x88) */ 2299struct hbq_mask {
2300#ifdef __BIG_ENDIAN_BITFIELD
2301 uint8_t tmatch;
2302 uint8_t tmask;
2303 uint8_t rctlmatch;
2304 uint8_t rctlmask;
2305#else /* __LITTLE_ENDIAN */
2306 uint8_t rctlmask;
2307 uint8_t rctlmatch;
2308 uint8_t tmask;
2309 uint8_t tmatch;
2310#endif
2311};
2312
2313
2314/* Structure for MB Command CONFIG_HBQ (7c) */
2315
2316struct config_hbq_var {
2317#ifdef __BIG_ENDIAN_BITFIELD
2318 uint32_t rsvd1 :7;
2319 uint32_t recvNotify :1; /* Receive Notification */
2320 uint32_t numMask :8; /* # Mask Entries */
2321 uint32_t profile :8; /* Selection Profile */
2322 uint32_t rsvd2 :8;
2323#else /* __LITTLE_ENDIAN */
2324 uint32_t rsvd2 :8;
2325 uint32_t profile :8; /* Selection Profile */
2326 uint32_t numMask :8; /* # Mask Entries */
2327 uint32_t recvNotify :1; /* Receive Notification */
2328 uint32_t rsvd1 :7;
2329#endif
2330
2331#ifdef __BIG_ENDIAN_BITFIELD
2332 uint32_t hbqId :16;
2333 uint32_t rsvd3 :12;
2334 uint32_t ringMask :4;
2335#else /* __LITTLE_ENDIAN */
2336 uint32_t ringMask :4;
2337 uint32_t rsvd3 :12;
2338 uint32_t hbqId :16;
2339#endif
2340
2341#ifdef __BIG_ENDIAN_BITFIELD
2342 uint32_t entry_count :16;
2343 uint32_t rsvd4 :8;
2344 uint32_t headerLen :8;
2345#else /* __LITTLE_ENDIAN */
2346 uint32_t headerLen :8;
2347 uint32_t rsvd4 :8;
2348 uint32_t entry_count :16;
2349#endif
2350
2351 uint32_t hbqaddrLow;
2352 uint32_t hbqaddrHigh;
2353
2354#ifdef __BIG_ENDIAN_BITFIELD
2355 uint32_t rsvd5 :31;
2356 uint32_t logEntry :1;
2357#else /* __LITTLE_ENDIAN */
2358 uint32_t logEntry :1;
2359 uint32_t rsvd5 :31;
2360#endif
2361
2362 uint32_t rsvd6; /* w7 */
2363 uint32_t rsvd7; /* w8 */
2364 uint32_t rsvd8; /* w9 */
2365
2366 struct hbq_mask hbqMasks[6];
2367
2368
2369 union {
2370 uint32_t allprofiles[12];
2371
2372 struct {
2373 #ifdef __BIG_ENDIAN_BITFIELD
2374 uint32_t seqlenoff :16;
2375 uint32_t maxlen :16;
2376 #else /* __LITTLE_ENDIAN */
2377 uint32_t maxlen :16;
2378 uint32_t seqlenoff :16;
2379 #endif
2380 #ifdef __BIG_ENDIAN_BITFIELD
2381 uint32_t rsvd1 :28;
2382 uint32_t seqlenbcnt :4;
2383 #else /* __LITTLE_ENDIAN */
2384 uint32_t seqlenbcnt :4;
2385 uint32_t rsvd1 :28;
2386 #endif
2387 uint32_t rsvd[10];
2388 } profile2;
2389
2390 struct {
2391 #ifdef __BIG_ENDIAN_BITFIELD
2392 uint32_t seqlenoff :16;
2393 uint32_t maxlen :16;
2394 #else /* __LITTLE_ENDIAN */
2395 uint32_t maxlen :16;
2396 uint32_t seqlenoff :16;
2397 #endif
2398 #ifdef __BIG_ENDIAN_BITFIELD
2399 uint32_t cmdcodeoff :28;
2400 uint32_t rsvd1 :12;
2401 uint32_t seqlenbcnt :4;
2402 #else /* __LITTLE_ENDIAN */
2403 uint32_t seqlenbcnt :4;
2404 uint32_t rsvd1 :12;
2405 uint32_t cmdcodeoff :28;
2406 #endif
2407 uint32_t cmdmatch[8];
2408
2409 uint32_t rsvd[2];
2410 } profile3;
2411
2412 struct {
2413 #ifdef __BIG_ENDIAN_BITFIELD
2414 uint32_t seqlenoff :16;
2415 uint32_t maxlen :16;
2416 #else /* __LITTLE_ENDIAN */
2417 uint32_t maxlen :16;
2418 uint32_t seqlenoff :16;
2419 #endif
2420 #ifdef __BIG_ENDIAN_BITFIELD
2421 uint32_t cmdcodeoff :28;
2422 uint32_t rsvd1 :12;
2423 uint32_t seqlenbcnt :4;
2424 #else /* __LITTLE_ENDIAN */
2425 uint32_t seqlenbcnt :4;
2426 uint32_t rsvd1 :12;
2427 uint32_t cmdcodeoff :28;
2428 #endif
2429 uint32_t cmdmatch[8];
2430
2431 uint32_t rsvd[2];
2432 } profile5;
2433
2434 } profiles;
2182 2435
2436};
2437
2438
2439
2440/* Structure for MB Command CONFIG_PORT (0x88) */
2183typedef struct { 2441typedef struct {
2184 uint32_t pcbLen; 2442#ifdef __BIG_ENDIAN_BITFIELD
2443 uint32_t cBE : 1;
2444 uint32_t cET : 1;
2445 uint32_t cHpcb : 1;
2446 uint32_t cMA : 1;
2447 uint32_t sli_mode : 4;
2448 uint32_t pcbLen : 24; /* bit 23:0 of memory based port
2449 * config block */
2450#else /* __LITTLE_ENDIAN */
2451 uint32_t pcbLen : 24; /* bit 23:0 of memory based port
2452 * config block */
2453 uint32_t sli_mode : 4;
2454 uint32_t cMA : 1;
2455 uint32_t cHpcb : 1;
2456 uint32_t cET : 1;
2457 uint32_t cBE : 1;
2458#endif
2459
2185 uint32_t pcbLow; /* bit 31:0 of memory based port config block */ 2460 uint32_t pcbLow; /* bit 31:0 of memory based port config block */
2186 uint32_t pcbHigh; /* bit 63:32 of memory based port config block */ 2461 uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
2187 uint32_t hbainit[5]; 2462 uint32_t hbainit[6];
2463
2464#ifdef __BIG_ENDIAN_BITFIELD
2465 uint32_t rsvd : 24; /* Reserved */
2466 uint32_t cmv : 1; /* Configure Max VPIs */
2467 uint32_t ccrp : 1; /* Config Command Ring Polling */
2468 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
2469 uint32_t chbs : 1; /* Cofigure Host Backing store */
2470 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
2471 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
2472 uint32_t cmx : 1; /* Configure Max XRIs */
2473 uint32_t cmr : 1; /* Configure Max RPIs */
2474#else /* __LITTLE_ENDIAN */
2475 uint32_t cmr : 1; /* Configure Max RPIs */
2476 uint32_t cmx : 1; /* Configure Max XRIs */
2477 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
2478 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
2479 uint32_t chbs : 1; /* Cofigure Host Backing store */
2480 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
2481 uint32_t ccrp : 1; /* Config Command Ring Polling */
2482 uint32_t cmv : 1; /* Configure Max VPIs */
2483 uint32_t rsvd : 24; /* Reserved */
2484#endif
2485#ifdef __BIG_ENDIAN_BITFIELD
2486 uint32_t rsvd2 : 24; /* Reserved */
2487 uint32_t gmv : 1; /* Grant Max VPIs */
2488 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2489 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
2490 uint32_t ghbs : 1; /* Grant Host Backing Store */
2491 uint32_t ginb : 1; /* Grant Interrupt Notification Block */
2492 uint32_t gerbm : 1; /* Grant ERBM Request */
2493 uint32_t gmx : 1; /* Grant Max XRIs */
2494 uint32_t gmr : 1; /* Grant Max RPIs */
2495#else /* __LITTLE_ENDIAN */
2496 uint32_t gmr : 1; /* Grant Max RPIs */
2497 uint32_t gmx : 1; /* Grant Max XRIs */
2498 uint32_t gerbm : 1; /* Grant ERBM Request */
2499 uint32_t ginb : 1; /* Grant Interrupt Notification Block */
2500 uint32_t ghbs : 1; /* Grant Host Backing Store */
2501 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
2502 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2503 uint32_t gmv : 1; /* Grant Max VPIs */
2504 uint32_t rsvd2 : 24; /* Reserved */
2505#endif
2506
2507#ifdef __BIG_ENDIAN_BITFIELD
2508 uint32_t max_rpi : 16; /* Max RPIs Port should configure */
2509 uint32_t max_xri : 16; /* Max XRIs Port should configure */
2510#else /* __LITTLE_ENDIAN */
2511 uint32_t max_xri : 16; /* Max XRIs Port should configure */
2512 uint32_t max_rpi : 16; /* Max RPIs Port should configure */
2513#endif
2514
2515#ifdef __BIG_ENDIAN_BITFIELD
2516 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2517 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
2518#else /* __LITTLE_ENDIAN */
2519 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
2520 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2521#endif
2522
2523 uint32_t rsvd4; /* Reserved */
2524
2525#ifdef __BIG_ENDIAN_BITFIELD
2526 uint32_t rsvd5 : 16; /* Reserved */
2527 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2528#else /* __LITTLE_ENDIAN */
2529 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2530 uint32_t rsvd5 : 16; /* Reserved */
2531#endif
2532
2188} CONFIG_PORT_VAR; 2533} CONFIG_PORT_VAR;
2189 2534
2190/* SLI-2 Port Control Block */ 2535/* SLI-2 Port Control Block */
@@ -2262,33 +2607,40 @@ typedef struct {
2262#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t)) 2607#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
2263 2608
2264typedef union { 2609typedef union {
2265 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; 2610 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
2266 LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */ 2611 * feature/max ring number
2267 READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */ 2612 */
2268 WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */ 2613 LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
2269 BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */ 2614 READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
2270 INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */ 2615 WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
2616 BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
2617 INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
2271 DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */ 2618 DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
2272 CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */ 2619 CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
2273 PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */ 2620 PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
2274 CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */ 2621 CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */
2275 RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */ 2622 RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */
2276 READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */ 2623 READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */
2277 READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */ 2624 READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */
2278 READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */ 2625 READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */
2279 READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */ 2626 READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */
2280 READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */ 2627 READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
2281 READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */ 2628 READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
2282 READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */ 2629 READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
2283 READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */ 2630 READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
2284 REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */ 2631 REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
2285 UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */ 2632 UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
2286 READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */ 2633 READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
2287 CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */ 2634 CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
2288 DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */ 2635 DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
2289 UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */ 2636 UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
2290 CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) NEW_FEATURE */ 2637 CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP)
2291 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ 2638 * NEW_FEATURE
2639 */
2640 struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
2641 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
2642 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
2643 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
2292} MAILVARIANTS; 2644} MAILVARIANTS;
2293 2645
2294/* 2646/*
@@ -2305,14 +2657,27 @@ struct lpfc_pgp {
2305 __le32 rspPutInx; 2657 __le32 rspPutInx;
2306}; 2658};
2307 2659
2308typedef struct _SLI2_DESC { 2660struct sli2_desc {
2309 struct lpfc_hgp host[MAX_RINGS];
2310 uint32_t unused1[16]; 2661 uint32_t unused1[16];
2662 struct lpfc_hgp host[MAX_RINGS];
2663 struct lpfc_pgp port[MAX_RINGS];
2664};
2665
2666struct sli3_desc {
2667 struct lpfc_hgp host[MAX_RINGS];
2668 uint32_t reserved[8];
2669 uint32_t hbq_put[16];
2670};
2671
2672struct sli3_pgp {
2311 struct lpfc_pgp port[MAX_RINGS]; 2673 struct lpfc_pgp port[MAX_RINGS];
2312} SLI2_DESC; 2674 uint32_t hbq_get[16];
2675};
2313 2676
2314typedef union { 2677typedef union {
2315 SLI2_DESC s2; 2678 struct sli2_desc s2;
2679 struct sli3_desc s3;
2680 struct sli3_pgp s3_pgp;
2316} SLI_VAR; 2681} SLI_VAR;
2317 2682
2318typedef struct { 2683typedef struct {
@@ -2618,6 +2983,25 @@ typedef struct {
2618 uint32_t fcpt_Length; /* transfer ready for IWRITE */ 2983 uint32_t fcpt_Length; /* transfer ready for IWRITE */
2619} FCPT_FIELDS64; 2984} FCPT_FIELDS64;
2620 2985
2986/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
2987 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
2988
2989struct rcv_sli3 {
2990 uint32_t word8Rsvd;
2991#ifdef __BIG_ENDIAN_BITFIELD
2992 uint16_t vpi;
2993 uint16_t word9Rsvd;
2994#else /* __LITTLE_ENDIAN */
2995 uint16_t word9Rsvd;
2996 uint16_t vpi;
2997#endif
2998 uint32_t word10Rsvd;
2999 uint32_t acc_len; /* accumulated length */
3000 struct ulp_bde64 bde2;
3001};
3002
3003
3004
2621typedef struct _IOCB { /* IOCB structure */ 3005typedef struct _IOCB { /* IOCB structure */
2622 union { 3006 union {
2623 GENERIC_RSP grsp; /* Generic response */ 3007 GENERIC_RSP grsp; /* Generic response */
@@ -2632,8 +3016,8 @@ typedef struct _IOCB { /* IOCB structure */
2632 3016
2633 /* SLI-2 structures */ 3017 /* SLI-2 structures */
2634 3018
2635 struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation 3019 struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
2636 bde_64s */ 3020 * bde_64s */
2637 ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */ 3021 ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
2638 GEN_REQUEST64 genreq64; /* GEN_REQUEST template */ 3022 GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
2639 RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */ 3023 RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
@@ -2695,9 +3079,20 @@ typedef struct _IOCB { /* IOCB structure */
2695 uint32_t ulpTimeout:8; 3079 uint32_t ulpTimeout:8;
2696#endif 3080#endif
2697 3081
3082 union {
3083 struct rcv_sli3 rcvsli3; /* words 8 - 15 */
3084 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
3085 } unsli3;
3086
3087#define ulpCt_h ulpXS
3088#define ulpCt_l ulpFCP2Rcvy
3089
3090#define IOCB_FCP 1 /* IOCB is used for FCP ELS cmds-ulpRsvByte */
3091#define IOCB_IP 2 /* IOCB is used for IP ELS cmds */
2698#define PARM_UNUSED 0 /* PU field (Word 4) not used */ 3092#define PARM_UNUSED 0 /* PU field (Word 4) not used */
2699#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */ 3093#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
2700#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */ 3094#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
3095#define PARM_NPIV_DID 3
2701#define CLASS1 0 /* Class 1 */ 3096#define CLASS1 0 /* Class 1 */
2702#define CLASS2 1 /* Class 2 */ 3097#define CLASS2 1 /* Class 2 */
2703#define CLASS3 2 /* Class 3 */ 3098#define CLASS3 2 /* Class 3 */
@@ -2718,39 +3113,51 @@ typedef struct _IOCB { /* IOCB structure */
2718#define IOSTAT_RSVD2 0xC 3113#define IOSTAT_RSVD2 0xC
2719#define IOSTAT_RSVD3 0xD 3114#define IOSTAT_RSVD3 0xD
2720#define IOSTAT_RSVD4 0xE 3115#define IOSTAT_RSVD4 0xE
2721#define IOSTAT_RSVD5 0xF 3116#define IOSTAT_NEED_BUFFER 0xF
2722#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */ 3117#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
2723#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */ 3118#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
2724#define IOSTAT_CNT 0x11 3119#define IOSTAT_CNT 0x11
2725 3120
2726} IOCB_t; 3121} IOCB_t;
2727 3122
3123/* Structure used for a single HBQ entry */
3124struct lpfc_hbq_entry {
3125 struct ulp_bde64 bde;
3126 uint32_t buffer_tag;
3127};
3128
2728 3129
2729#define SLI1_SLIM_SIZE (4 * 1024) 3130#define SLI1_SLIM_SIZE (4 * 1024)
2730 3131
2731/* Up to 498 IOCBs will fit into 16k 3132/* Up to 498 IOCBs will fit into 16k
2732 * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384 3133 * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
2733 */ 3134 */
2734#define SLI2_SLIM_SIZE (16 * 1024) 3135#define SLI2_SLIM_SIZE (64 * 1024)
2735 3136
2736/* Maximum IOCBs that will fit in SLI2 slim */ 3137/* Maximum IOCBs that will fit in SLI2 slim */
2737#define MAX_SLI2_IOCB 498 3138#define MAX_SLI2_IOCB 498
3139#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
3140 (sizeof(MAILBOX_t) + sizeof(PCB_t)))
3141
3142/* HBQ entries are 4 words each = 4k */
3143#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
3144 lpfc_sli_hbq_count())
2738 3145
2739struct lpfc_sli2_slim { 3146struct lpfc_sli2_slim {
2740 MAILBOX_t mbx; 3147 MAILBOX_t mbx;
2741 PCB_t pcb; 3148 PCB_t pcb;
2742 IOCB_t IOCBs[MAX_SLI2_IOCB]; 3149 IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
2743}; 3150};
2744 3151
2745/******************************************************************* 3152/*
2746This macro check PCI device to allow special handling for LC HBAs. 3153 * This function checks PCI device to allow special handling for LC HBAs.
2747 3154 *
2748Parameters: 3155 * Parameters:
2749device : struct pci_dev 's device field 3156 * device : struct pci_dev 's device field
2750 3157 *
2751return 1 => TRUE 3158 * return 1 => TRUE
2752 0 => FALSE 3159 * 0 => FALSE
2753 *******************************************************************/ 3160 */
2754static inline int 3161static inline int
2755lpfc_is_LC_HBA(unsigned short device) 3162lpfc_is_LC_HBA(unsigned short device)
2756{ 3163{
@@ -2766,3 +3173,16 @@ lpfc_is_LC_HBA(unsigned short device)
2766 else 3173 else
2767 return 0; 3174 return 0;
2768} 3175}
3176
3177/*
3178 * Determine if an IOCB failed because of a link event or firmware reset.
3179 */
3180
3181static inline int
3182lpfc_error_lost_link(IOCB_t *iocbp)
3183{
3184 return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
3185 (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
3186 iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
3187 iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
3188}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 955b2e48d041..f81f85ee190f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -27,6 +27,7 @@
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h>
30 31
31#include <scsi/scsi.h> 32#include <scsi/scsi.h>
32#include <scsi/scsi_device.h> 33#include <scsi/scsi_device.h>
@@ -40,15 +41,20 @@
40#include "lpfc.h" 41#include "lpfc.h"
41#include "lpfc_logmsg.h" 42#include "lpfc_logmsg.h"
42#include "lpfc_crtn.h" 43#include "lpfc_crtn.h"
44#include "lpfc_vport.h"
43#include "lpfc_version.h" 45#include "lpfc_version.h"
46#include "lpfc_vport.h"
44 47
45static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 48static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
46static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 49static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
47static int lpfc_post_rcv_buf(struct lpfc_hba *); 50static int lpfc_post_rcv_buf(struct lpfc_hba *);
48 51
49static struct scsi_transport_template *lpfc_transport_template = NULL; 52static struct scsi_transport_template *lpfc_transport_template = NULL;
53static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
50static DEFINE_IDR(lpfc_hba_index); 54static DEFINE_IDR(lpfc_hba_index);
51 55
56
57
52/************************************************************************/ 58/************************************************************************/
53/* */ 59/* */
54/* lpfc_config_port_prep */ 60/* lpfc_config_port_prep */
@@ -61,7 +67,7 @@ static DEFINE_IDR(lpfc_hba_index);
61/* */ 67/* */
62/************************************************************************/ 68/************************************************************************/
63int 69int
64lpfc_config_port_prep(struct lpfc_hba * phba) 70lpfc_config_port_prep(struct lpfc_hba *phba)
65{ 71{
66 lpfc_vpd_t *vp = &phba->vpd; 72 lpfc_vpd_t *vp = &phba->vpd;
67 int i = 0, rc; 73 int i = 0, rc;
@@ -75,12 +81,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
75 81
76 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 82 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
77 if (!pmb) { 83 if (!pmb) {
78 phba->hba_state = LPFC_HBA_ERROR; 84 phba->link_state = LPFC_HBA_ERROR;
79 return -ENOMEM; 85 return -ENOMEM;
80 } 86 }
81 87
82 mb = &pmb->mb; 88 mb = &pmb->mb;
83 phba->hba_state = LPFC_INIT_MBX_CMDS; 89 phba->link_state = LPFC_INIT_MBX_CMDS;
84 90
85 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 91 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
86 if (init_key) { 92 if (init_key) {
@@ -100,9 +106,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
100 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 106 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
101 107
102 if (rc != MBX_SUCCESS) { 108 if (rc != MBX_SUCCESS) {
103 lpfc_printf_log(phba, 109 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
104 KERN_ERR,
105 LOG_MBOX,
106 "%d:0324 Config Port initialization " 110 "%d:0324 Config Port initialization "
107 "error, mbxCmd x%x READ_NVPARM, " 111 "error, mbxCmd x%x READ_NVPARM, "
108 "mbxStatus x%x\n", 112 "mbxStatus x%x\n",
@@ -112,16 +116,18 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
112 return -ERESTART; 116 return -ERESTART;
113 } 117 }
114 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 118 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
115 sizeof (mb->un.varRDnvp.nodename)); 119 sizeof(phba->wwnn));
120 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
121 sizeof(phba->wwpn));
116 } 122 }
117 123
124 phba->sli3_options = 0x0;
125
118 /* Setup and issue mailbox READ REV command */ 126 /* Setup and issue mailbox READ REV command */
119 lpfc_read_rev(phba, pmb); 127 lpfc_read_rev(phba, pmb);
120 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 128 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
121 if (rc != MBX_SUCCESS) { 129 if (rc != MBX_SUCCESS) {
122 lpfc_printf_log(phba, 130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
123 KERN_ERR,
124 LOG_INIT,
125 "%d:0439 Adapter failed to init, mbxCmd x%x " 131 "%d:0439 Adapter failed to init, mbxCmd x%x "
126 "READ_REV, mbxStatus x%x\n", 132 "READ_REV, mbxStatus x%x\n",
127 phba->brd_no, 133 phba->brd_no,
@@ -130,6 +136,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
130 return -ERESTART; 136 return -ERESTART;
131 } 137 }
132 138
139
133 /* 140 /*
134 * The value of rr must be 1 since the driver set the cv field to 1. 141 * The value of rr must be 1 since the driver set the cv field to 1.
135 * This setting requires the FW to set all revision fields. 142 * This setting requires the FW to set all revision fields.
@@ -144,8 +151,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
144 return -ERESTART; 151 return -ERESTART;
145 } 152 }
146 153
154 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
155 return -EINVAL;
156
147 /* Save information as VPD data */ 157 /* Save information as VPD data */
148 vp->rev.rBit = 1; 158 vp->rev.rBit = 1;
159 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
149 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 160 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
150 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 161 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
151 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 162 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
@@ -161,6 +172,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
161 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 172 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
162 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 173 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
163 174
175 /* If the sli feature level is less then 9, we must
176 * tear down all RPIs and VPIs on link down if NPIV
177 * is enabled.
178 */
179 if (vp->rev.feaLevelHigh < 9)
180 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
181
164 if (lpfc_is_LC_HBA(phba->pcidev->device)) 182 if (lpfc_is_LC_HBA(phba->pcidev->device))
165 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 183 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
166 sizeof (phba->RandomData)); 184 sizeof (phba->RandomData));
@@ -188,7 +206,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
188 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 206 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
189 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 207 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
190 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 208 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
191 mb->un.varDmp.word_cnt); 209 mb->un.varDmp.word_cnt);
192 offset += mb->un.varDmp.word_cnt; 210 offset += mb->un.varDmp.word_cnt;
193 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 211 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
194 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 212 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
@@ -212,48 +230,34 @@ out_free_mbox:
212/* */ 230/* */
213/************************************************************************/ 231/************************************************************************/
214int 232int
215lpfc_config_port_post(struct lpfc_hba * phba) 233lpfc_config_port_post(struct lpfc_hba *phba)
216{ 234{
235 struct lpfc_vport *vport = phba->pport;
217 LPFC_MBOXQ_t *pmb; 236 LPFC_MBOXQ_t *pmb;
218 MAILBOX_t *mb; 237 MAILBOX_t *mb;
219 struct lpfc_dmabuf *mp; 238 struct lpfc_dmabuf *mp;
220 struct lpfc_sli *psli = &phba->sli; 239 struct lpfc_sli *psli = &phba->sli;
221 uint32_t status, timeout; 240 uint32_t status, timeout;
222 int i, j, rc; 241 int i, j;
242 int rc;
223 243
224 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 244 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
225 if (!pmb) { 245 if (!pmb) {
226 phba->hba_state = LPFC_HBA_ERROR; 246 phba->link_state = LPFC_HBA_ERROR;
227 return -ENOMEM; 247 return -ENOMEM;
228 } 248 }
229 mb = &pmb->mb; 249 mb = &pmb->mb;
230 250
231 lpfc_config_link(phba, pmb);
232 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
233 if (rc != MBX_SUCCESS) {
234 lpfc_printf_log(phba,
235 KERN_ERR,
236 LOG_INIT,
237 "%d:0447 Adapter failed init, mbxCmd x%x "
238 "CONFIG_LINK mbxStatus x%x\n",
239 phba->brd_no,
240 mb->mbxCommand, mb->mbxStatus);
241 phba->hba_state = LPFC_HBA_ERROR;
242 mempool_free( pmb, phba->mbox_mem_pool);
243 return -EIO;
244 }
245
246 /* Get login parameters for NID. */ 251 /* Get login parameters for NID. */
247 lpfc_read_sparam(phba, pmb); 252 lpfc_read_sparam(phba, pmb, 0);
253 pmb->vport = vport;
248 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 254 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
249 lpfc_printf_log(phba, 255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
250 KERN_ERR,
251 LOG_INIT,
252 "%d:0448 Adapter failed init, mbxCmd x%x " 256 "%d:0448 Adapter failed init, mbxCmd x%x "
253 "READ_SPARM mbxStatus x%x\n", 257 "READ_SPARM mbxStatus x%x\n",
254 phba->brd_no, 258 phba->brd_no,
255 mb->mbxCommand, mb->mbxStatus); 259 mb->mbxCommand, mb->mbxStatus);
256 phba->hba_state = LPFC_HBA_ERROR; 260 phba->link_state = LPFC_HBA_ERROR;
257 mp = (struct lpfc_dmabuf *) pmb->context1; 261 mp = (struct lpfc_dmabuf *) pmb->context1;
258 mempool_free( pmb, phba->mbox_mem_pool); 262 mempool_free( pmb, phba->mbox_mem_pool);
259 lpfc_mbuf_free(phba, mp->virt, mp->phys); 263 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -263,25 +267,27 @@ lpfc_config_port_post(struct lpfc_hba * phba)
263 267
264 mp = (struct lpfc_dmabuf *) pmb->context1; 268 mp = (struct lpfc_dmabuf *) pmb->context1;
265 269
266 memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm)); 270 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
267 lpfc_mbuf_free(phba, mp->virt, mp->phys); 271 lpfc_mbuf_free(phba, mp->virt, mp->phys);
268 kfree(mp); 272 kfree(mp);
269 pmb->context1 = NULL; 273 pmb->context1 = NULL;
270 274
271 if (phba->cfg_soft_wwnn) 275 if (phba->cfg_soft_wwnn)
272 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn); 276 u64_to_wwn(phba->cfg_soft_wwnn,
277 vport->fc_sparam.nodeName.u.wwn);
273 if (phba->cfg_soft_wwpn) 278 if (phba->cfg_soft_wwpn)
274 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); 279 u64_to_wwn(phba->cfg_soft_wwpn,
275 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 280 vport->fc_sparam.portName.u.wwn);
281 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
276 sizeof (struct lpfc_name)); 282 sizeof (struct lpfc_name));
277 memcpy(&phba->fc_portname, &phba->fc_sparam.portName, 283 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
278 sizeof (struct lpfc_name)); 284 sizeof (struct lpfc_name));
279 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 285 /* If no serial number in VPD data, use low 6 bytes of WWNN */
280 /* This should be consolidated into parse_vpd ? - mr */ 286 /* This should be consolidated into parse_vpd ? - mr */
281 if (phba->SerialNumber[0] == 0) { 287 if (phba->SerialNumber[0] == 0) {
282 uint8_t *outptr; 288 uint8_t *outptr;
283 289
284 outptr = &phba->fc_nodename.u.s.IEEE[0]; 290 outptr = &vport->fc_nodename.u.s.IEEE[0];
285 for (i = 0; i < 12; i++) { 291 for (i = 0; i < 12; i++) {
286 status = *outptr++; 292 status = *outptr++;
287 j = ((status & 0xf0) >> 4); 293 j = ((status & 0xf0) >> 4);
@@ -303,15 +309,14 @@ lpfc_config_port_post(struct lpfc_hba * phba)
303 } 309 }
304 310
305 lpfc_read_config(phba, pmb); 311 lpfc_read_config(phba, pmb);
312 pmb->vport = vport;
306 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 313 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
307 lpfc_printf_log(phba, 314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
308 KERN_ERR,
309 LOG_INIT,
310 "%d:0453 Adapter failed to init, mbxCmd x%x " 315 "%d:0453 Adapter failed to init, mbxCmd x%x "
311 "READ_CONFIG, mbxStatus x%x\n", 316 "READ_CONFIG, mbxStatus x%x\n",
312 phba->brd_no, 317 phba->brd_no,
313 mb->mbxCommand, mb->mbxStatus); 318 mb->mbxCommand, mb->mbxStatus);
314 phba->hba_state = LPFC_HBA_ERROR; 319 phba->link_state = LPFC_HBA_ERROR;
315 mempool_free( pmb, phba->mbox_mem_pool); 320 mempool_free( pmb, phba->mbox_mem_pool);
316 return -EIO; 321 return -EIO;
317 } 322 }
@@ -338,9 +343,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
338 || ((phba->cfg_link_speed == LINK_SPEED_10G) 343 || ((phba->cfg_link_speed == LINK_SPEED_10G)
339 && !(phba->lmt & LMT_10Gb))) { 344 && !(phba->lmt & LMT_10Gb))) {
340 /* Reset link speed to auto */ 345 /* Reset link speed to auto */
341 lpfc_printf_log(phba, 346 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
342 KERN_WARNING,
343 LOG_LINK_EVENT,
344 "%d:1302 Invalid speed for this board: " 347 "%d:1302 Invalid speed for this board: "
345 "Reset link speed to auto: x%x\n", 348 "Reset link speed to auto: x%x\n",
346 phba->brd_no, 349 phba->brd_no,
@@ -348,7 +351,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
348 phba->cfg_link_speed = LINK_SPEED_AUTO; 351 phba->cfg_link_speed = LINK_SPEED_AUTO;
349 } 352 }
350 353
351 phba->hba_state = LPFC_LINK_DOWN; 354 phba->link_state = LPFC_LINK_DOWN;
352 355
353 /* Only process IOCBs on ring 0 till hba_state is READY */ 356 /* Only process IOCBs on ring 0 till hba_state is READY */
354 if (psli->ring[psli->extra_ring].cmdringaddr) 357 if (psli->ring[psli->extra_ring].cmdringaddr)
@@ -359,10 +362,11 @@ lpfc_config_port_post(struct lpfc_hba * phba)
359 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 362 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
360 363
361 /* Post receive buffers for desired rings */ 364 /* Post receive buffers for desired rings */
362 lpfc_post_rcv_buf(phba); 365 if (phba->sli_rev != 3)
366 lpfc_post_rcv_buf(phba);
363 367
364 /* Enable appropriate host interrupts */ 368 /* Enable appropriate host interrupts */
365 spin_lock_irq(phba->host->host_lock); 369 spin_lock_irq(&phba->hbalock);
366 status = readl(phba->HCregaddr); 370 status = readl(phba->HCregaddr);
367 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 371 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
368 if (psli->num_rings > 0) 372 if (psli->num_rings > 0)
@@ -380,22 +384,24 @@ lpfc_config_port_post(struct lpfc_hba * phba)
380 384
381 writel(status, phba->HCregaddr); 385 writel(status, phba->HCregaddr);
382 readl(phba->HCregaddr); /* flush */ 386 readl(phba->HCregaddr); /* flush */
383 spin_unlock_irq(phba->host->host_lock); 387 spin_unlock_irq(&phba->hbalock);
384 388
385 /* 389 /*
386 * Setup the ring 0 (els) timeout handler 390 * Setup the ring 0 (els) timeout handler
387 */ 391 */
388 timeout = phba->fc_ratov << 1; 392 timeout = phba->fc_ratov << 1;
389 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout); 393 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
394 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
395 phba->hb_outstanding = 0;
396 phba->last_completion_time = jiffies;
390 397
391 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 398 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
392 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 399 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
400 pmb->vport = vport;
393 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 401 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
394 lpfc_set_loopback_flag(phba); 402 lpfc_set_loopback_flag(phba);
395 if (rc != MBX_SUCCESS) { 403 if (rc != MBX_SUCCESS) {
396 lpfc_printf_log(phba, 404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
397 KERN_ERR,
398 LOG_INIT,
399 "%d:0454 Adapter failed to init, mbxCmd x%x " 405 "%d:0454 Adapter failed to init, mbxCmd x%x "
400 "INIT_LINK, mbxStatus x%x\n", 406 "INIT_LINK, mbxStatus x%x\n",
401 phba->brd_no, 407 phba->brd_no,
@@ -408,7 +414,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
408 writel(0xffffffff, phba->HAregaddr); 414 writel(0xffffffff, phba->HAregaddr);
409 readl(phba->HAregaddr); /* flush */ 415 readl(phba->HAregaddr); /* flush */
410 416
411 phba->hba_state = LPFC_HBA_ERROR; 417 phba->link_state = LPFC_HBA_ERROR;
412 if (rc != MBX_BUSY) 418 if (rc != MBX_BUSY)
413 mempool_free(pmb, phba->mbox_mem_pool); 419 mempool_free(pmb, phba->mbox_mem_pool);
414 return -EIO; 420 return -EIO;
@@ -429,18 +435,19 @@ lpfc_config_port_post(struct lpfc_hba * phba)
429/* */ 435/* */
430/************************************************************************/ 436/************************************************************************/
431int 437int
432lpfc_hba_down_prep(struct lpfc_hba * phba) 438lpfc_hba_down_prep(struct lpfc_hba *phba)
433{ 439{
440 struct lpfc_vport *vport = phba->pport;
441
434 /* Disable interrupts */ 442 /* Disable interrupts */
435 writel(0, phba->HCregaddr); 443 writel(0, phba->HCregaddr);
436 readl(phba->HCregaddr); /* flush */ 444 readl(phba->HCregaddr); /* flush */
437 445
438 /* Cleanup potential discovery resources */ 446 list_for_each_entry(vport, &phba->port_list, listentry) {
439 lpfc_els_flush_rscn(phba); 447 lpfc_cleanup_discovery_resources(vport);
440 lpfc_els_flush_cmd(phba); 448 }
441 lpfc_disc_flush_list(phba);
442 449
443 return (0); 450 return 0;
444} 451}
445 452
446/************************************************************************/ 453/************************************************************************/
@@ -453,20 +460,24 @@ lpfc_hba_down_prep(struct lpfc_hba * phba)
453/* */ 460/* */
454/************************************************************************/ 461/************************************************************************/
455int 462int
456lpfc_hba_down_post(struct lpfc_hba * phba) 463lpfc_hba_down_post(struct lpfc_hba *phba)
457{ 464{
458 struct lpfc_sli *psli = &phba->sli; 465 struct lpfc_sli *psli = &phba->sli;
459 struct lpfc_sli_ring *pring; 466 struct lpfc_sli_ring *pring;
460 struct lpfc_dmabuf *mp, *next_mp; 467 struct lpfc_dmabuf *mp, *next_mp;
461 int i; 468 int i;
462 469
463 /* Cleanup preposted buffers on the ELS ring */ 470 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
464 pring = &psli->ring[LPFC_ELS_RING]; 471 lpfc_sli_hbqbuf_free_all(phba);
465 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 472 else {
466 list_del(&mp->list); 473 /* Cleanup preposted buffers on the ELS ring */
467 pring->postbufq_cnt--; 474 pring = &psli->ring[LPFC_ELS_RING];
468 lpfc_mbuf_free(phba, mp->virt, mp->phys); 475 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
469 kfree(mp); 476 list_del(&mp->list);
477 pring->postbufq_cnt--;
478 lpfc_mbuf_free(phba, mp->virt, mp->phys);
479 kfree(mp);
480 }
470 } 481 }
471 482
472 for (i = 0; i < psli->num_rings; i++) { 483 for (i = 0; i < psli->num_rings; i++) {
@@ -477,6 +488,119 @@ lpfc_hba_down_post(struct lpfc_hba * phba)
477 return 0; 488 return 0;
478} 489}
479 490
491/* HBA heart beat timeout handler */
492void
493lpfc_hb_timeout(unsigned long ptr)
494{
495 struct lpfc_hba *phba;
496 unsigned long iflag;
497
498 phba = (struct lpfc_hba *)ptr;
499 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
500 if (!(phba->pport->work_port_events & WORKER_HB_TMO))
501 phba->pport->work_port_events |= WORKER_HB_TMO;
502 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
503
504 if (phba->work_wait)
505 wake_up(phba->work_wait);
506 return;
507}
508
509static void
510lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
511{
512 unsigned long drvr_flag;
513
514 spin_lock_irqsave(&phba->hbalock, drvr_flag);
515 phba->hb_outstanding = 0;
516 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
517
518 mempool_free(pmboxq, phba->mbox_mem_pool);
519 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
520 !(phba->link_state == LPFC_HBA_ERROR) &&
521 !(phba->pport->fc_flag & FC_UNLOADING))
522 mod_timer(&phba->hb_tmofunc,
523 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
524 return;
525}
526
527void
528lpfc_hb_timeout_handler(struct lpfc_hba *phba)
529{
530 LPFC_MBOXQ_t *pmboxq;
531 int retval;
532 struct lpfc_sli *psli = &phba->sli;
533
534 if ((phba->link_state == LPFC_HBA_ERROR) ||
535 (phba->pport->fc_flag & FC_UNLOADING) ||
536 (phba->pport->fc_flag & FC_OFFLINE_MODE))
537 return;
538
539 spin_lock_irq(&phba->pport->work_port_lock);
540 /* If the timer is already canceled do nothing */
541 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
542 spin_unlock_irq(&phba->pport->work_port_lock);
543 return;
544 }
545
546 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
547 jiffies)) {
548 spin_unlock_irq(&phba->pport->work_port_lock);
549 if (!phba->hb_outstanding)
550 mod_timer(&phba->hb_tmofunc,
551 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
552 else
553 mod_timer(&phba->hb_tmofunc,
554 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
555 return;
556 }
557 spin_unlock_irq(&phba->pport->work_port_lock);
558
559 /* If there is no heart beat outstanding, issue a heartbeat command */
560 if (!phba->hb_outstanding) {
561 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
562 if (!pmboxq) {
563 mod_timer(&phba->hb_tmofunc,
564 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
565 return;
566 }
567
568 lpfc_heart_beat(phba, pmboxq);
569 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
570 pmboxq->vport = phba->pport;
571 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
572
573 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
574 mempool_free(pmboxq, phba->mbox_mem_pool);
575 mod_timer(&phba->hb_tmofunc,
576 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
577 return;
578 }
579 mod_timer(&phba->hb_tmofunc,
580 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
581 phba->hb_outstanding = 1;
582 return;
583 } else {
584 /*
585 * If heart beat timeout called with hb_outstanding set we
586 * need to take the HBA offline.
587 */
588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
589 "%d:0459 Adapter heartbeat failure, taking "
590 "this port offline.\n", phba->brd_no);
591
592 spin_lock_irq(&phba->hbalock);
593 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
594 spin_unlock_irq(&phba->hbalock);
595
596 lpfc_offline_prep(phba);
597 lpfc_offline(phba);
598 lpfc_unblock_mgmt_io(phba);
599 phba->link_state = LPFC_HBA_ERROR;
600 lpfc_hba_down_post(phba);
601 }
602}
603
480/************************************************************************/ 604/************************************************************************/
481/* */ 605/* */
482/* lpfc_handle_eratt */ 606/* lpfc_handle_eratt */
@@ -486,11 +610,15 @@ lpfc_hba_down_post(struct lpfc_hba * phba)
486/* */ 610/* */
487/************************************************************************/ 611/************************************************************************/
488void 612void
489lpfc_handle_eratt(struct lpfc_hba * phba) 613lpfc_handle_eratt(struct lpfc_hba *phba)
490{ 614{
491 struct lpfc_sli *psli = &phba->sli; 615 struct lpfc_vport *vport = phba->pport;
616 struct lpfc_sli *psli = &phba->sli;
492 struct lpfc_sli_ring *pring; 617 struct lpfc_sli_ring *pring;
618 struct lpfc_vport *port_iterator;
493 uint32_t event_data; 619 uint32_t event_data;
620 struct Scsi_Host *shost;
621
494 /* If the pci channel is offline, ignore possible errors, 622 /* If the pci channel is offline, ignore possible errors,
495 * since we cannot communicate with the pci card anyway. */ 623 * since we cannot communicate with the pci card anyway. */
496 if (pci_channel_offline(phba->pcidev)) 624 if (pci_channel_offline(phba->pcidev))
@@ -504,10 +632,17 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
504 "Data: x%x x%x x%x\n", 632 "Data: x%x x%x x%x\n",
505 phba->brd_no, phba->work_hs, 633 phba->brd_no, phba->work_hs,
506 phba->work_status[0], phba->work_status[1]); 634 phba->work_status[0], phba->work_status[1]);
507 spin_lock_irq(phba->host->host_lock); 635 list_for_each_entry(port_iterator, &phba->port_list,
508 phba->fc_flag |= FC_ESTABLISH_LINK; 636 listentry) {
637 shost = lpfc_shost_from_vport(port_iterator);
638
639 spin_lock_irq(shost->host_lock);
640 port_iterator->fc_flag |= FC_ESTABLISH_LINK;
641 spin_unlock_irq(shost->host_lock);
642 }
643 spin_lock_irq(&phba->hbalock);
509 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 644 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
510 spin_unlock_irq(phba->host->host_lock); 645 spin_unlock_irq(&phba->hbalock);
511 646
512 /* 647 /*
513 * Firmware stops when it triggled erratt with HS_FFER6. 648 * Firmware stops when it triggled erratt with HS_FFER6.
@@ -544,15 +679,18 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
544 phba->work_status[0], phba->work_status[1]); 679 phba->work_status[0], phba->work_status[1]);
545 680
546 event_data = FC_REG_DUMP_EVENT; 681 event_data = FC_REG_DUMP_EVENT;
547 fc_host_post_vendor_event(phba->host, fc_get_event_number(), 682 shost = lpfc_shost_from_vport(vport);
683 fc_host_post_vendor_event(shost, fc_get_event_number(),
548 sizeof(event_data), (char *) &event_data, 684 sizeof(event_data), (char *) &event_data,
549 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 685 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
550 686
687 spin_lock_irq(&phba->hbalock);
551 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 688 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
689 spin_unlock_irq(&phba->hbalock);
552 lpfc_offline_prep(phba); 690 lpfc_offline_prep(phba);
553 lpfc_offline(phba); 691 lpfc_offline(phba);
554 lpfc_unblock_mgmt_io(phba); 692 lpfc_unblock_mgmt_io(phba);
555 phba->hba_state = LPFC_HBA_ERROR; 693 phba->link_state = LPFC_HBA_ERROR;
556 lpfc_hba_down_post(phba); 694 lpfc_hba_down_post(phba);
557 } 695 }
558} 696}
@@ -566,9 +704,11 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
566/* */ 704/* */
567/************************************************************************/ 705/************************************************************************/
568void 706void
569lpfc_handle_latt(struct lpfc_hba * phba) 707lpfc_handle_latt(struct lpfc_hba *phba)
570{ 708{
571 struct lpfc_sli *psli = &phba->sli; 709 struct lpfc_vport *vport = phba->pport;
710 struct lpfc_sli *psli = &phba->sli;
711 struct lpfc_vport *port_iterator;
572 LPFC_MBOXQ_t *pmb; 712 LPFC_MBOXQ_t *pmb;
573 volatile uint32_t control; 713 volatile uint32_t control;
574 struct lpfc_dmabuf *mp; 714 struct lpfc_dmabuf *mp;
@@ -589,20 +729,22 @@ lpfc_handle_latt(struct lpfc_hba * phba)
589 rc = -EIO; 729 rc = -EIO;
590 730
591 /* Cleanup any outstanding ELS commands */ 731 /* Cleanup any outstanding ELS commands */
592 lpfc_els_flush_cmd(phba); 732 list_for_each_entry(port_iterator, &phba->port_list, listentry)
733 lpfc_els_flush_cmd(port_iterator);
593 734
594 psli->slistat.link_event++; 735 psli->slistat.link_event++;
595 lpfc_read_la(phba, pmb, mp); 736 lpfc_read_la(phba, pmb, mp);
596 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 737 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
738 pmb->vport = vport;
597 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 739 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
598 if (rc == MBX_NOT_FINISHED) 740 if (rc == MBX_NOT_FINISHED)
599 goto lpfc_handle_latt_free_mbuf; 741 goto lpfc_handle_latt_free_mbuf;
600 742
601 /* Clear Link Attention in HA REG */ 743 /* Clear Link Attention in HA REG */
602 spin_lock_irq(phba->host->host_lock); 744 spin_lock_irq(&phba->hbalock);
603 writel(HA_LATT, phba->HAregaddr); 745 writel(HA_LATT, phba->HAregaddr);
604 readl(phba->HAregaddr); /* flush */ 746 readl(phba->HAregaddr); /* flush */
605 spin_unlock_irq(phba->host->host_lock); 747 spin_unlock_irq(&phba->hbalock);
606 748
607 return; 749 return;
608 750
@@ -614,7 +756,7 @@ lpfc_handle_latt_free_pmb:
614 mempool_free(pmb, phba->mbox_mem_pool); 756 mempool_free(pmb, phba->mbox_mem_pool);
615lpfc_handle_latt_err_exit: 757lpfc_handle_latt_err_exit:
616 /* Enable Link attention interrupts */ 758 /* Enable Link attention interrupts */
617 spin_lock_irq(phba->host->host_lock); 759 spin_lock_irq(&phba->hbalock);
618 psli->sli_flag |= LPFC_PROCESS_LA; 760 psli->sli_flag |= LPFC_PROCESS_LA;
619 control = readl(phba->HCregaddr); 761 control = readl(phba->HCregaddr);
620 control |= HC_LAINT_ENA; 762 control |= HC_LAINT_ENA;
@@ -624,15 +766,13 @@ lpfc_handle_latt_err_exit:
624 /* Clear Link Attention in HA REG */ 766 /* Clear Link Attention in HA REG */
625 writel(HA_LATT, phba->HAregaddr); 767 writel(HA_LATT, phba->HAregaddr);
626 readl(phba->HAregaddr); /* flush */ 768 readl(phba->HAregaddr); /* flush */
627 spin_unlock_irq(phba->host->host_lock); 769 spin_unlock_irq(&phba->hbalock);
628 lpfc_linkdown(phba); 770 lpfc_linkdown(phba);
629 phba->hba_state = LPFC_HBA_ERROR; 771 phba->link_state = LPFC_HBA_ERROR;
630 772
631 /* The other case is an error from issue_mbox */ 773 /* The other case is an error from issue_mbox */
632 if (rc == -ENOMEM) 774 if (rc == -ENOMEM)
633 lpfc_printf_log(phba, 775 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
634 KERN_WARNING,
635 LOG_MBOX,
636 "%d:0300 READ_LA: no buffers\n", 776 "%d:0300 READ_LA: no buffers\n",
637 phba->brd_no); 777 phba->brd_no);
638 778
@@ -646,7 +786,7 @@ lpfc_handle_latt_err_exit:
646/* */ 786/* */
647/************************************************************************/ 787/************************************************************************/
648static int 788static int
649lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len) 789lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
650{ 790{
651 uint8_t lenlo, lenhi; 791 uint8_t lenlo, lenhi;
652 int Length; 792 int Length;
@@ -658,9 +798,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
658 return 0; 798 return 0;
659 799
660 /* Vital Product */ 800 /* Vital Product */
661 lpfc_printf_log(phba, 801 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
662 KERN_INFO,
663 LOG_INIT,
664 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n", 802 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
665 phba->brd_no, 803 phba->brd_no,
666 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 804 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
@@ -785,7 +923,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
785} 923}
786 924
787static void 925static void
788lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) 926lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
789{ 927{
790 lpfc_vpd_t *vp; 928 lpfc_vpd_t *vp;
791 uint16_t dev_id = phba->pcidev->device; 929 uint16_t dev_id = phba->pcidev->device;
@@ -943,7 +1081,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
943/* Returns the number of buffers NOT posted. */ 1081/* Returns the number of buffers NOT posted. */
944/**************************************************/ 1082/**************************************************/
945int 1083int
946lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, 1084lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
947 int type) 1085 int type)
948{ 1086{
949 IOCB_t *icmd; 1087 IOCB_t *icmd;
@@ -955,9 +1093,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
955 /* While there are buffers to post */ 1093 /* While there are buffers to post */
956 while (cnt > 0) { 1094 while (cnt > 0) {
957 /* Allocate buffer for command iocb */ 1095 /* Allocate buffer for command iocb */
958 spin_lock_irq(phba->host->host_lock);
959 iocb = lpfc_sli_get_iocbq(phba); 1096 iocb = lpfc_sli_get_iocbq(phba);
960 spin_unlock_irq(phba->host->host_lock);
961 if (iocb == NULL) { 1097 if (iocb == NULL) {
962 pring->missbufcnt = cnt; 1098 pring->missbufcnt = cnt;
963 return cnt; 1099 return cnt;
@@ -972,9 +1108,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
972 &mp1->phys); 1108 &mp1->phys);
973 if (mp1 == 0 || mp1->virt == 0) { 1109 if (mp1 == 0 || mp1->virt == 0) {
974 kfree(mp1); 1110 kfree(mp1);
975 spin_lock_irq(phba->host->host_lock);
976 lpfc_sli_release_iocbq(phba, iocb); 1111 lpfc_sli_release_iocbq(phba, iocb);
977 spin_unlock_irq(phba->host->host_lock);
978 pring->missbufcnt = cnt; 1112 pring->missbufcnt = cnt;
979 return cnt; 1113 return cnt;
980 } 1114 }
@@ -990,9 +1124,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
990 kfree(mp2); 1124 kfree(mp2);
991 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1125 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
992 kfree(mp1); 1126 kfree(mp1);
993 spin_lock_irq(phba->host->host_lock);
994 lpfc_sli_release_iocbq(phba, iocb); 1127 lpfc_sli_release_iocbq(phba, iocb);
995 spin_unlock_irq(phba->host->host_lock);
996 pring->missbufcnt = cnt; 1128 pring->missbufcnt = cnt;
997 return cnt; 1129 return cnt;
998 } 1130 }
@@ -1018,7 +1150,6 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
1018 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1150 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1019 icmd->ulpLe = 1; 1151 icmd->ulpLe = 1;
1020 1152
1021 spin_lock_irq(phba->host->host_lock);
1022 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1153 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
1023 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1154 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1024 kfree(mp1); 1155 kfree(mp1);
@@ -1030,14 +1161,11 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
1030 } 1161 }
1031 lpfc_sli_release_iocbq(phba, iocb); 1162 lpfc_sli_release_iocbq(phba, iocb);
1032 pring->missbufcnt = cnt; 1163 pring->missbufcnt = cnt;
1033 spin_unlock_irq(phba->host->host_lock);
1034 return cnt; 1164 return cnt;
1035 } 1165 }
1036 spin_unlock_irq(phba->host->host_lock);
1037 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1166 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1038 if (mp2) { 1167 if (mp2)
1039 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1168 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1040 }
1041 } 1169 }
1042 pring->missbufcnt = 0; 1170 pring->missbufcnt = 0;
1043 return 0; 1171 return 0;
@@ -1050,7 +1178,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
1050/* */ 1178/* */
1051/************************************************************************/ 1179/************************************************************************/
1052static int 1180static int
1053lpfc_post_rcv_buf(struct lpfc_hba * phba) 1181lpfc_post_rcv_buf(struct lpfc_hba *phba)
1054{ 1182{
1055 struct lpfc_sli *psli = &phba->sli; 1183 struct lpfc_sli *psli = &phba->sli;
1056 1184
@@ -1151,7 +1279,7 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1151{ 1279{
1152 int t; 1280 int t;
1153 uint32_t *HashWorking; 1281 uint32_t *HashWorking;
1154 uint32_t *pwwnn = phba->wwnn; 1282 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1155 1283
1156 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL); 1284 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
1157 if (!HashWorking) 1285 if (!HashWorking)
@@ -1170,64 +1298,76 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1170} 1298}
1171 1299
1172static void 1300static void
1173lpfc_cleanup(struct lpfc_hba * phba) 1301lpfc_cleanup(struct lpfc_vport *vport)
1174{ 1302{
1175 struct lpfc_nodelist *ndlp, *next_ndlp; 1303 struct lpfc_nodelist *ndlp, *next_ndlp;
1176 1304
1177 /* clean up phba - lpfc specific */ 1305 /* clean up phba - lpfc specific */
1178 lpfc_can_disctmo(phba); 1306 lpfc_can_disctmo(vport);
1179 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) 1307 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
1180 lpfc_nlp_put(ndlp); 1308 lpfc_nlp_put(ndlp);
1181
1182 INIT_LIST_HEAD(&phba->fc_nodes);
1183
1184 return; 1309 return;
1185} 1310}
1186 1311
1187static void 1312static void
1188lpfc_establish_link_tmo(unsigned long ptr) 1313lpfc_establish_link_tmo(unsigned long ptr)
1189{ 1314{
1190 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 1315 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1316 struct lpfc_vport *vport = phba->pport;
1191 unsigned long iflag; 1317 unsigned long iflag;
1192 1318
1193
1194 /* Re-establishing Link, timer expired */ 1319 /* Re-establishing Link, timer expired */
1195 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1320 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1196 "%d:1300 Re-establishing Link, timer expired " 1321 "%d:1300 Re-establishing Link, timer expired "
1197 "Data: x%x x%x\n", 1322 "Data: x%x x%x\n",
1198 phba->brd_no, phba->fc_flag, phba->hba_state); 1323 phba->brd_no, vport->fc_flag,
1199 spin_lock_irqsave(phba->host->host_lock, iflag); 1324 vport->port_state);
1200 phba->fc_flag &= ~FC_ESTABLISH_LINK; 1325 list_for_each_entry(vport, &phba->port_list, listentry) {
1201 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1327
1328 spin_lock_irqsave(shost->host_lock, iflag);
1329 vport->fc_flag &= ~FC_ESTABLISH_LINK;
1330 spin_unlock_irqrestore(shost->host_lock, iflag);
1331 }
1202} 1332}
1203 1333
1204static int 1334void
1205lpfc_stop_timer(struct lpfc_hba * phba) 1335lpfc_stop_vport_timers(struct lpfc_vport *vport)
1206{ 1336{
1207 struct lpfc_sli *psli = &phba->sli; 1337 del_timer_sync(&vport->els_tmofunc);
1338 del_timer_sync(&vport->fc_fdmitmo);
1339 lpfc_can_disctmo(vport);
1340 return;
1341}
1342
1343static void
1344lpfc_stop_phba_timers(struct lpfc_hba *phba)
1345{
1346 struct lpfc_vport *vport;
1208 1347
1209 del_timer_sync(&phba->fcp_poll_timer); 1348 del_timer_sync(&phba->fcp_poll_timer);
1210 del_timer_sync(&phba->fc_estabtmo); 1349 del_timer_sync(&phba->fc_estabtmo);
1211 del_timer_sync(&phba->fc_disctmo); 1350 list_for_each_entry(vport, &phba->port_list, listentry)
1212 del_timer_sync(&phba->fc_fdmitmo); 1351 lpfc_stop_vport_timers(vport);
1213 del_timer_sync(&phba->els_tmofunc); 1352 del_timer_sync(&phba->sli.mbox_tmo);
1214 psli = &phba->sli; 1353 del_timer_sync(&phba->fabric_block_timer);
1215 del_timer_sync(&psli->mbox_tmo); 1354 phba->hb_outstanding = 0;
1216 return(1); 1355 del_timer_sync(&phba->hb_tmofunc);
1356 return;
1217} 1357}
1218 1358
1219int 1359int
1220lpfc_online(struct lpfc_hba * phba) 1360lpfc_online(struct lpfc_hba *phba)
1221{ 1361{
1362 struct lpfc_vport *vport = phba->pport;
1363
1222 if (!phba) 1364 if (!phba)
1223 return 0; 1365 return 0;
1224 1366
1225 if (!(phba->fc_flag & FC_OFFLINE_MODE)) 1367 if (!(vport->fc_flag & FC_OFFLINE_MODE))
1226 return 0; 1368 return 0;
1227 1369
1228 lpfc_printf_log(phba, 1370 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1229 KERN_WARNING,
1230 LOG_INIT,
1231 "%d:0458 Bring Adapter online\n", 1371 "%d:0458 Bring Adapter online\n",
1232 phba->brd_no); 1372 phba->brd_no);
1233 1373
@@ -1243,9 +1383,14 @@ lpfc_online(struct lpfc_hba * phba)
1243 return 1; 1383 return 1;
1244 } 1384 }
1245 1385
1246 spin_lock_irq(phba->host->host_lock); 1386 list_for_each_entry(vport, &phba->port_list, listentry) {
1247 phba->fc_flag &= ~FC_OFFLINE_MODE; 1387 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1248 spin_unlock_irq(phba->host->host_lock); 1388 spin_lock_irq(shost->host_lock);
1389 vport->fc_flag &= ~FC_OFFLINE_MODE;
1390 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
1391 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1392 spin_unlock_irq(shost->host_lock);
1393 }
1249 1394
1250 lpfc_unblock_mgmt_io(phba); 1395 lpfc_unblock_mgmt_io(phba);
1251 return 0; 1396 return 0;
@@ -1256,9 +1401,9 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
1256{ 1401{
1257 unsigned long iflag; 1402 unsigned long iflag;
1258 1403
1259 spin_lock_irqsave(phba->host->host_lock, iflag); 1404 spin_lock_irqsave(&phba->hbalock, iflag);
1260 phba->fc_flag |= FC_BLOCK_MGMT_IO; 1405 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
1261 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1406 spin_unlock_irqrestore(&phba->hbalock, iflag);
1262} 1407}
1263 1408
1264void 1409void
@@ -1266,17 +1411,18 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1266{ 1411{
1267 unsigned long iflag; 1412 unsigned long iflag;
1268 1413
1269 spin_lock_irqsave(phba->host->host_lock, iflag); 1414 spin_lock_irqsave(&phba->hbalock, iflag);
1270 phba->fc_flag &= ~FC_BLOCK_MGMT_IO; 1415 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
1271 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1416 spin_unlock_irqrestore(&phba->hbalock, iflag);
1272} 1417}
1273 1418
1274void 1419void
1275lpfc_offline_prep(struct lpfc_hba * phba) 1420lpfc_offline_prep(struct lpfc_hba * phba)
1276{ 1421{
1422 struct lpfc_vport *vport = phba->pport;
1277 struct lpfc_nodelist *ndlp, *next_ndlp; 1423 struct lpfc_nodelist *ndlp, *next_ndlp;
1278 1424
1279 if (phba->fc_flag & FC_OFFLINE_MODE) 1425 if (vport->fc_flag & FC_OFFLINE_MODE)
1280 return; 1426 return;
1281 1427
1282 lpfc_block_mgmt_io(phba); 1428 lpfc_block_mgmt_io(phba);
@@ -1284,39 +1430,49 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1284 lpfc_linkdown(phba); 1430 lpfc_linkdown(phba);
1285 1431
1286 /* Issue an unreg_login to all nodes */ 1432 /* Issue an unreg_login to all nodes */
1287 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) 1433 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
1288 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) 1434 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
1289 lpfc_unreg_rpi(phba, ndlp); 1435 lpfc_unreg_rpi(vport, ndlp);
1290 1436
1291 lpfc_sli_flush_mbox_queue(phba); 1437 lpfc_sli_flush_mbox_queue(phba);
1292} 1438}
1293 1439
1294void 1440void
1295lpfc_offline(struct lpfc_hba * phba) 1441lpfc_offline(struct lpfc_hba *phba)
1296{ 1442{
1297 unsigned long iflag; 1443 struct lpfc_vport *vport = phba->pport;
1444 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1445 struct lpfc_vport *port_iterator;
1298 1446
1299 if (phba->fc_flag & FC_OFFLINE_MODE) 1447 if (vport->fc_flag & FC_OFFLINE_MODE)
1300 return; 1448 return;
1301 1449
1302 /* stop all timers associated with this hba */ 1450 /* stop all timers associated with this hba */
1303 lpfc_stop_timer(phba); 1451 lpfc_stop_phba_timers(phba);
1452 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
1453 port_iterator->work_port_events = 0;
1454 }
1304 1455
1305 lpfc_printf_log(phba, 1456 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1306 KERN_WARNING,
1307 LOG_INIT,
1308 "%d:0460 Bring Adapter offline\n", 1457 "%d:0460 Bring Adapter offline\n",
1309 phba->brd_no); 1458 phba->brd_no);
1310 1459
1311 /* Bring down the SLI Layer and cleanup. The HBA is offline 1460 /* Bring down the SLI Layer and cleanup. The HBA is offline
1312 now. */ 1461 now. */
1313 lpfc_sli_hba_down(phba); 1462 lpfc_sli_hba_down(phba);
1314 lpfc_cleanup(phba); 1463 spin_lock_irq(&phba->hbalock);
1315 spin_lock_irqsave(phba->host->host_lock, iflag);
1316 phba->work_hba_events = 0;
1317 phba->work_ha = 0; 1464 phba->work_ha = 0;
1318 phba->fc_flag |= FC_OFFLINE_MODE; 1465 vport->fc_flag |= FC_OFFLINE_MODE;
1319 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1466 spin_unlock_irq(&phba->hbalock);
1467 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
1468 shost = lpfc_shost_from_vport(port_iterator);
1469
1470 lpfc_cleanup(port_iterator);
1471 spin_lock_irq(shost->host_lock);
1472 vport->work_port_events = 0;
1473 vport->fc_flag |= FC_OFFLINE_MODE;
1474 spin_unlock_irq(shost->host_lock);
1475 }
1320} 1476}
1321 1477
1322/****************************************************************************** 1478/******************************************************************************
@@ -1326,17 +1482,17 @@ lpfc_offline(struct lpfc_hba * phba)
1326* 1482*
1327******************************************************************************/ 1483******************************************************************************/
1328static int 1484static int
1329lpfc_scsi_free(struct lpfc_hba * phba) 1485lpfc_scsi_free(struct lpfc_hba *phba)
1330{ 1486{
1331 struct lpfc_scsi_buf *sb, *sb_next; 1487 struct lpfc_scsi_buf *sb, *sb_next;
1332 struct lpfc_iocbq *io, *io_next; 1488 struct lpfc_iocbq *io, *io_next;
1333 1489
1334 spin_lock_irq(phba->host->host_lock); 1490 spin_lock_irq(&phba->hbalock);
1335 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1491 /* Release all the lpfc_scsi_bufs maintained by this host. */
1336 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1492 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
1337 list_del(&sb->list); 1493 list_del(&sb->list);
1338 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1494 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
1339 sb->dma_handle); 1495 sb->dma_handle);
1340 kfree(sb); 1496 kfree(sb);
1341 phba->total_scsi_bufs--; 1497 phba->total_scsi_bufs--;
1342 } 1498 }
@@ -1348,134 +1504,183 @@ lpfc_scsi_free(struct lpfc_hba * phba)
1348 phba->total_iocbq_bufs--; 1504 phba->total_iocbq_bufs--;
1349 } 1505 }
1350 1506
1351 spin_unlock_irq(phba->host->host_lock); 1507 spin_unlock_irq(&phba->hbalock);
1352 1508
1353 return 0; 1509 return 0;
1354} 1510}
1355 1511
1356void lpfc_remove_device(struct lpfc_hba *phba)
1357{
1358 unsigned long iflag;
1359 1512
1360 lpfc_free_sysfs_attr(phba); 1513struct lpfc_vport *
1361 1514lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
1362 spin_lock_irqsave(phba->host->host_lock, iflag); 1515{
1363 phba->fc_flag |= FC_UNLOADING; 1516 struct lpfc_vport *vport;
1517 struct Scsi_Host *shost;
1518 int error = 0;
1364 1519
1365 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1520 shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport));
1521 if (!shost)
1522 goto out;
1366 1523
1367 fc_remove_host(phba->host); 1524 vport = (struct lpfc_vport *) shost->hostdata;
1368 scsi_remove_host(phba->host); 1525 vport->phba = phba;
1369 1526
1370 kthread_stop(phba->worker_thread); 1527 vport->load_flag |= FC_LOADING;
1528 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1371 1529
1530 shost->unique_id = instance;
1531 shost->max_id = LPFC_MAX_TARGET;
1532 shost->max_lun = phba->cfg_max_luns;
1533 shost->this_id = -1;
1534 shost->max_cmd_len = 16;
1372 /* 1535 /*
1373 * Bring down the SLI Layer. This step disable all interrupts, 1536 * Set initial can_queue value since 0 is no longer supported and
1374 * clears the rings, discards all mailbox commands, and resets 1537 * scsi_add_host will fail. This will be adjusted later based on the
1375 * the HBA. 1538 * max xri value determined in hba setup.
1376 */ 1539 */
1377 lpfc_sli_hba_down(phba); 1540 shost->can_queue = phba->cfg_hba_queue_depth - 10;
1378 lpfc_sli_brdrestart(phba); 1541 if (fc_vport != NULL) {
1542 shost->transportt = lpfc_vport_transport_template;
1543 vport->port_type = LPFC_NPIV_PORT;
1544 } else {
1545 shost->transportt = lpfc_transport_template;
1546 vport->port_type = LPFC_PHYSICAL_PORT;
1547 }
1379 1548
1380 /* Release the irq reservation */ 1549 /* Initialize all internally managed lists. */
1381 free_irq(phba->pcidev->irq, phba); 1550 INIT_LIST_HEAD(&vport->fc_nodes);
1382 pci_disable_msi(phba->pcidev); 1551 spin_lock_init(&vport->work_port_lock);
1383 1552
1384 lpfc_cleanup(phba); 1553 init_timer(&vport->fc_disctmo);
1385 lpfc_stop_timer(phba); 1554 vport->fc_disctmo.function = lpfc_disc_timeout;
1386 phba->work_hba_events = 0; 1555 vport->fc_disctmo.data = (unsigned long)vport;
1387 1556
1388 /* 1557 init_timer(&vport->fc_fdmitmo);
1389 * Call scsi_free before mem_free since scsi bufs are released to their 1558 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
1390 * corresponding pools here. 1559 vport->fc_fdmitmo.data = (unsigned long)vport;
1391 */
1392 lpfc_scsi_free(phba);
1393 lpfc_mem_free(phba);
1394 1560
1395 /* Free resources associated with SLI2 interface */ 1561 init_timer(&vport->els_tmofunc);
1396 dma_free_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 1562 vport->els_tmofunc.function = lpfc_els_timeout;
1397 phba->slim2p, phba->slim2p_mapping); 1563 vport->els_tmofunc.data = (unsigned long)vport;
1398 1564
1399 /* unmap adapter SLIM and Control Registers */ 1565 if (fc_vport != NULL) {
1400 iounmap(phba->ctrl_regs_memmap_p); 1566 error = scsi_add_host(shost, &fc_vport->dev);
1401 iounmap(phba->slim_memmap_p); 1567 } else {
1568 error = scsi_add_host(shost, &phba->pcidev->dev);
1569 }
1570 if (error)
1571 goto out_put_shost;
1402 1572
1403 pci_release_regions(phba->pcidev); 1573 list_add_tail(&vport->listentry, &phba->port_list);
1404 pci_disable_device(phba->pcidev); 1574 return vport;
1405 1575
1406 idr_remove(&lpfc_hba_index, phba->brd_no); 1576out_put_shost:
1407 scsi_host_put(phba->host); 1577 scsi_host_put(shost);
1578out:
1579 return NULL;
1408} 1580}
1409 1581
1410void lpfc_scan_start(struct Scsi_Host *host) 1582void
1583destroy_port(struct lpfc_vport *vport)
1411{ 1584{
1412 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1585 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1586 struct lpfc_hba *phba = vport->phba;
1413 1587
1414 if (lpfc_alloc_sysfs_attr(phba)) 1588 kfree(vport->vname);
1415 goto error;
1416 1589
1417 phba->MBslimaddr = phba->slim_memmap_p; 1590 lpfc_debugfs_terminate(vport);
1418 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 1591 fc_remove_host(shost);
1419 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 1592 scsi_remove_host(shost);
1420 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1421 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1422 1593
1423 if (lpfc_sli_hba_setup(phba)) 1594 spin_lock_irq(&phba->hbalock);
1424 goto error; 1595 list_del_init(&vport->listentry);
1596 spin_unlock_irq(&phba->hbalock);
1425 1597
1426 /* 1598 lpfc_cleanup(vport);
1427 * hba setup may have changed the hba_queue_depth so we need to adjust
1428 * the value of can_queue.
1429 */
1430 host->can_queue = phba->cfg_hba_queue_depth - 10;
1431 return; 1599 return;
1600}
1601
1602int
1603lpfc_get_instance(void)
1604{
1605 int instance = 0;
1432 1606
1433error: 1607 /* Assign an unused number */
1434 lpfc_remove_device(phba); 1608 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
1609 return -1;
1610 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
1611 return -1;
1612 return instance;
1435} 1613}
1436 1614
1615/*
1616 * Note: there is no scan_start function as adapter initialization
1617 * will have asynchronously kicked off the link initialization.
1618 */
1619
1437int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 1620int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
1438{ 1621{
1439 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1622 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1623 struct lpfc_hba *phba = vport->phba;
1624 int stat = 0;
1440 1625
1441 if (!phba->host) 1626 spin_lock_irq(shost->host_lock);
1442 return 1; 1627
1443 if (time >= 30 * HZ) 1628 if (vport->fc_flag & FC_UNLOADING) {
1629 stat = 1;
1630 goto finished;
1631 }
1632 if (time >= 30 * HZ) {
1633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1634 "%d:0461 Scanning longer than 30 "
1635 "seconds. Continuing initialization\n",
1636 phba->brd_no);
1637 stat = 1;
1638 goto finished;
1639 }
1640 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
1641 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1642 "%d:0465 Link down longer than 15 "
1643 "seconds. Continuing initialization\n",
1644 phba->brd_no);
1645 stat = 1;
1444 goto finished; 1646 goto finished;
1647 }
1445 1648
1446 if (phba->hba_state != LPFC_HBA_READY) 1649 if (vport->port_state != LPFC_VPORT_READY)
1447 return 0; 1650 goto finished;
1448 if (phba->num_disc_nodes || phba->fc_prli_sent) 1651 if (vport->num_disc_nodes || vport->fc_prli_sent)
1449 return 0; 1652 goto finished;
1450 if ((phba->fc_map_cnt == 0) && (time < 2 * HZ)) 1653 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
1451 return 0; 1654 goto finished;
1452 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) 1655 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
1453 return 0; 1656 goto finished;
1454 if ((phba->hba_state > LPFC_LINK_DOWN) || (time < 15 * HZ)) 1657
1455 return 0; 1658 stat = 1;
1456 1659
1457finished: 1660finished:
1458 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1661 spin_unlock_irq(shost->host_lock);
1459 spin_lock_irq(shost->host_lock); 1662 return stat;
1460 lpfc_poll_start_timer(phba); 1663}
1461 spin_unlock_irq(shost->host_lock);
1462 }
1463 1664
1665void lpfc_host_attrib_init(struct Scsi_Host *shost)
1666{
1667 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1668 struct lpfc_hba *phba = vport->phba;
1464 /* 1669 /*
1465 * set fixed host attributes 1670 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
1466 * Must done after lpfc_sli_hba_setup()
1467 */ 1671 */
1468 1672
1469 fc_host_node_name(shost) = wwn_to_u64(phba->fc_nodename.u.wwn); 1673 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
1470 fc_host_port_name(shost) = wwn_to_u64(phba->fc_portname.u.wwn); 1674 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
1471 fc_host_supported_classes(shost) = FC_COS_CLASS3; 1675 fc_host_supported_classes(shost) = FC_COS_CLASS3;
1472 1676
1473 memset(fc_host_supported_fc4s(shost), 0, 1677 memset(fc_host_supported_fc4s(shost), 0,
1474 sizeof(fc_host_supported_fc4s(shost))); 1678 sizeof(fc_host_supported_fc4s(shost)));
1475 fc_host_supported_fc4s(shost)[2] = 1; 1679 fc_host_supported_fc4s(shost)[2] = 1;
1476 fc_host_supported_fc4s(shost)[7] = 1; 1680 fc_host_supported_fc4s(shost)[7] = 1;
1477 1681
1478 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost)); 1682 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
1683 sizeof fc_host_symbolic_name(shost));
1479 1684
1480 fc_host_supported_speeds(shost) = 0; 1685 fc_host_supported_speeds(shost) = 0;
1481 if (phba->lmt & LMT_10Gb) 1686 if (phba->lmt & LMT_10Gb)
@@ -1488,31 +1693,31 @@ finished:
1488 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 1693 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
1489 1694
1490 fc_host_maxframe_size(shost) = 1695 fc_host_maxframe_size(shost) =
1491 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 1696 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
1492 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb); 1697 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
1493 1698
1494 /* This value is also unchanging */ 1699 /* This value is also unchanging */
1495 memset(fc_host_active_fc4s(shost), 0, 1700 memset(fc_host_active_fc4s(shost), 0,
1496 sizeof(fc_host_active_fc4s(shost))); 1701 sizeof(fc_host_active_fc4s(shost)));
1497 fc_host_active_fc4s(shost)[2] = 1; 1702 fc_host_active_fc4s(shost)[2] = 1;
1498 fc_host_active_fc4s(shost)[7] = 1; 1703 fc_host_active_fc4s(shost)[7] = 1;
1499 1704
1705 fc_host_max_npiv_vports(shost) = phba->max_vpi;
1500 spin_lock_irq(shost->host_lock); 1706 spin_lock_irq(shost->host_lock);
1501 phba->fc_flag &= ~FC_LOADING; 1707 vport->fc_flag &= ~FC_LOADING;
1502 spin_unlock_irq(shost->host_lock); 1708 spin_unlock_irq(shost->host_lock);
1503
1504 return 1;
1505} 1709}
1506 1710
1507static int __devinit 1711static int __devinit
1508lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1712lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1509{ 1713{
1510 struct Scsi_Host *host; 1714 struct lpfc_vport *vport = NULL;
1511 struct lpfc_hba *phba; 1715 struct lpfc_hba *phba;
1512 struct lpfc_sli *psli; 1716 struct lpfc_sli *psli;
1513 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 1717 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
1718 struct Scsi_Host *shost = NULL;
1514 unsigned long bar0map_len, bar2map_len; 1719 unsigned long bar0map_len, bar2map_len;
1515 int error = -ENODEV, retval; 1720 int error = -ENODEV;
1516 int i; 1721 int i;
1517 uint16_t iotag; 1722 uint16_t iotag;
1518 1723
@@ -1521,61 +1726,46 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1521 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1726 if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
1522 goto out_disable_device; 1727 goto out_disable_device;
1523 1728
1524 host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba)); 1729 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
1525 if (!host) 1730 if (!phba)
1526 goto out_release_regions; 1731 goto out_release_regions;
1527 1732
1528 phba = (struct lpfc_hba*)host->hostdata; 1733 spin_lock_init(&phba->hbalock);
1529 memset(phba, 0, sizeof (struct lpfc_hba));
1530 phba->host = host;
1531 1734
1532 phba->fc_flag |= FC_LOADING;
1533 phba->pcidev = pdev; 1735 phba->pcidev = pdev;
1534 1736
1535 /* Assign an unused board number */ 1737 /* Assign an unused board number */
1536 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1738 if ((phba->brd_no = lpfc_get_instance()) < 0)
1537 goto out_put_host; 1739 goto out_free_phba;
1538 1740
1539 error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no); 1741 INIT_LIST_HEAD(&phba->port_list);
1540 if (error) 1742 INIT_LIST_HEAD(&phba->hbq_buffer_list);
1541 goto out_put_host; 1743 /*
1542 1744 * Get all the module params for configuring this host and then
1543 host->unique_id = phba->brd_no; 1745 * establish the host.
1746 */
1747 lpfc_get_cfgparam(phba);
1748 phba->max_vpi = LPFC_MAX_VPI;
1544 1749
1545 /* Initialize timers used by driver */ 1750 /* Initialize timers used by driver */
1546 init_timer(&phba->fc_estabtmo); 1751 init_timer(&phba->fc_estabtmo);
1547 phba->fc_estabtmo.function = lpfc_establish_link_tmo; 1752 phba->fc_estabtmo.function = lpfc_establish_link_tmo;
1548 phba->fc_estabtmo.data = (unsigned long)phba; 1753 phba->fc_estabtmo.data = (unsigned long)phba;
1549 init_timer(&phba->fc_disctmo); 1754
1550 phba->fc_disctmo.function = lpfc_disc_timeout; 1755 init_timer(&phba->hb_tmofunc);
1551 phba->fc_disctmo.data = (unsigned long)phba; 1756 phba->hb_tmofunc.function = lpfc_hb_timeout;
1552 1757 phba->hb_tmofunc.data = (unsigned long)phba;
1553 init_timer(&phba->fc_fdmitmo); 1758
1554 phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
1555 phba->fc_fdmitmo.data = (unsigned long)phba;
1556 init_timer(&phba->els_tmofunc);
1557 phba->els_tmofunc.function = lpfc_els_timeout;
1558 phba->els_tmofunc.data = (unsigned long)phba;
1559 psli = &phba->sli; 1759 psli = &phba->sli;
1560 init_timer(&psli->mbox_tmo); 1760 init_timer(&psli->mbox_tmo);
1561 psli->mbox_tmo.function = lpfc_mbox_timeout; 1761 psli->mbox_tmo.function = lpfc_mbox_timeout;
1562 psli->mbox_tmo.data = (unsigned long)phba; 1762 psli->mbox_tmo.data = (unsigned long) phba;
1563
1564 init_timer(&phba->fcp_poll_timer); 1763 init_timer(&phba->fcp_poll_timer);
1565 phba->fcp_poll_timer.function = lpfc_poll_timeout; 1764 phba->fcp_poll_timer.function = lpfc_poll_timeout;
1566 phba->fcp_poll_timer.data = (unsigned long)phba; 1765 phba->fcp_poll_timer.data = (unsigned long) phba;
1567 1766 init_timer(&phba->fabric_block_timer);
1568 /* 1767 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
1569 * Get all the module params for configuring this host and then 1768 phba->fabric_block_timer.data = (unsigned long) phba;
1570 * establish the host parameters.
1571 */
1572 lpfc_get_cfgparam(phba);
1573
1574 host->max_id = LPFC_MAX_TARGET;
1575 host->max_lun = phba->cfg_max_luns;
1576 host->this_id = -1;
1577
1578 INIT_LIST_HEAD(&phba->fc_nodes);
1579 1769
1580 pci_set_master(pdev); 1770 pci_set_master(pdev);
1581 pci_try_set_mwi(pdev); 1771 pci_try_set_mwi(pdev);
@@ -1620,13 +1810,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1620 1810
1621 memset(phba->slim2p, 0, SLI2_SLIM_SIZE); 1811 memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
1622 1812
1813 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
1814 lpfc_sli_hbq_size(),
1815 &phba->hbqslimp.phys,
1816 GFP_KERNEL);
1817 if (!phba->hbqslimp.virt)
1818 goto out_free_slim;
1819
1820 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
1821
1623 /* Initialize the SLI Layer to run with lpfc HBAs. */ 1822 /* Initialize the SLI Layer to run with lpfc HBAs. */
1624 lpfc_sli_setup(phba); 1823 lpfc_sli_setup(phba);
1625 lpfc_sli_queue_setup(phba); 1824 lpfc_sli_queue_setup(phba);
1626 1825
1627 error = lpfc_mem_alloc(phba); 1826 error = lpfc_mem_alloc(phba);
1628 if (error) 1827 if (error)
1629 goto out_free_slim; 1828 goto out_free_hbqslimp;
1630 1829
1631 /* Initialize and populate the iocb list per host. */ 1830 /* Initialize and populate the iocb list per host. */
1632 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 1831 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
@@ -1650,10 +1849,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1650 error = -ENOMEM; 1849 error = -ENOMEM;
1651 goto out_free_iocbq; 1850 goto out_free_iocbq;
1652 } 1851 }
1653 spin_lock_irq(phba->host->host_lock); 1852
1853 spin_lock_irq(&phba->hbalock);
1654 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 1854 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
1655 phba->total_iocbq_bufs++; 1855 phba->total_iocbq_bufs++;
1656 spin_unlock_irq(phba->host->host_lock); 1856 spin_unlock_irq(&phba->hbalock);
1657 } 1857 }
1658 1858
1659 /* Initialize HBA structure */ 1859 /* Initialize HBA structure */
@@ -1674,22 +1874,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1674 goto out_free_iocbq; 1874 goto out_free_iocbq;
1675 } 1875 }
1676 1876
1677 /*
1678 * Set initial can_queue value since 0 is no longer supported and
1679 * scsi_add_host will fail. This will be adjusted later based on the
1680 * max xri value determined in hba setup.
1681 */
1682 host->can_queue = phba->cfg_hba_queue_depth - 10;
1683
1684 /* Tell the midlayer we support 16 byte commands */
1685 host->max_cmd_len = 16;
1686
1687 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 1877 /* Initialize the list of scsi buffers used by driver for scsi IO. */
1688 spin_lock_init(&phba->scsi_buf_list_lock); 1878 spin_lock_init(&phba->scsi_buf_list_lock);
1689 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 1879 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
1690 1880
1691 host->transportt = lpfc_transport_template; 1881 /* Initialize list of fabric iocbs */
1692 pci_set_drvdata(pdev, host); 1882 INIT_LIST_HEAD(&phba->fabric_iocb_list);
1883
1884 vport = lpfc_create_port(phba, phba->brd_no, NULL);
1885 if (!vport)
1886 goto out_kthread_stop;
1887
1888 shost = lpfc_shost_from_vport(vport);
1889 phba->pport = vport;
1890 lpfc_debugfs_initialize(vport);
1891
1892 pci_set_drvdata(pdev, shost);
1693 1893
1694 if (phba->cfg_use_msi) { 1894 if (phba->cfg_use_msi) {
1695 error = pci_enable_msi(phba->pcidev); 1895 error = pci_enable_msi(phba->pcidev);
@@ -1700,38 +1900,68 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1700 } 1900 }
1701 1901
1702 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 1902 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
1703 LPFC_DRIVER_NAME, phba); 1903 LPFC_DRIVER_NAME, phba);
1704 if (error) { 1904 if (error) {
1705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1706 "%d:0451 Enable interrupt handler failed\n", 1906 "%d:0451 Enable interrupt handler failed\n",
1707 phba->brd_no); 1907 phba->brd_no);
1708 goto out_kthread_stop; 1908 goto out_disable_msi;
1709 } 1909 }
1710 1910
1711 error = scsi_add_host(host, &pdev->dev); 1911 phba->MBslimaddr = phba->slim_memmap_p;
1712 if (error) 1912 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
1913 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
1914 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1915 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1916
1917 if (lpfc_alloc_sysfs_attr(vport))
1713 goto out_free_irq; 1918 goto out_free_irq;
1714 1919
1715 scsi_scan_host(host); 1920 if (lpfc_sli_hba_setup(phba))
1921 goto out_remove_device;
1922
1923 /*
1924 * hba setup may have changed the hba_queue_depth so we need to adjust
1925 * the value of can_queue.
1926 */
1927 shost->can_queue = phba->cfg_hba_queue_depth - 10;
1928
1929 lpfc_host_attrib_init(shost);
1930
1931 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1932 spin_lock_irq(shost->host_lock);
1933 lpfc_poll_start_timer(phba);
1934 spin_unlock_irq(shost->host_lock);
1935 }
1936
1937 scsi_scan_host(shost);
1716 1938
1717 return 0; 1939 return 0;
1718 1940
1941out_remove_device:
1942 lpfc_free_sysfs_attr(vport);
1943 spin_lock_irq(shost->host_lock);
1944 vport->fc_flag |= FC_UNLOADING;
1945 spin_unlock_irq(shost->host_lock);
1719out_free_irq: 1946out_free_irq:
1720 lpfc_stop_timer(phba); 1947 lpfc_stop_phba_timers(phba);
1721 phba->work_hba_events = 0; 1948 phba->pport->work_port_events = 0;
1722 free_irq(phba->pcidev->irq, phba); 1949 free_irq(phba->pcidev->irq, phba);
1950out_disable_msi:
1723 pci_disable_msi(phba->pcidev); 1951 pci_disable_msi(phba->pcidev);
1952 destroy_port(vport);
1724out_kthread_stop: 1953out_kthread_stop:
1725 kthread_stop(phba->worker_thread); 1954 kthread_stop(phba->worker_thread);
1726out_free_iocbq: 1955out_free_iocbq:
1727 list_for_each_entry_safe(iocbq_entry, iocbq_next, 1956 list_for_each_entry_safe(iocbq_entry, iocbq_next,
1728 &phba->lpfc_iocb_list, list) { 1957 &phba->lpfc_iocb_list, list) {
1729 spin_lock_irq(phba->host->host_lock);
1730 kfree(iocbq_entry); 1958 kfree(iocbq_entry);
1731 phba->total_iocbq_bufs--; 1959 phba->total_iocbq_bufs--;
1732 spin_unlock_irq(phba->host->host_lock);
1733 } 1960 }
1734 lpfc_mem_free(phba); 1961 lpfc_mem_free(phba);
1962out_free_hbqslimp:
1963 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
1964 phba->hbqslimp.phys);
1735out_free_slim: 1965out_free_slim:
1736 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 1966 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
1737 phba->slim2p_mapping); 1967 phba->slim2p_mapping);
@@ -1741,27 +1971,85 @@ out_iounmap_slim:
1741 iounmap(phba->slim_memmap_p); 1971 iounmap(phba->slim_memmap_p);
1742out_idr_remove: 1972out_idr_remove:
1743 idr_remove(&lpfc_hba_index, phba->brd_no); 1973 idr_remove(&lpfc_hba_index, phba->brd_no);
1744out_put_host: 1974out_free_phba:
1745 phba->host = NULL; 1975 kfree(phba);
1746 scsi_host_put(host);
1747out_release_regions: 1976out_release_regions:
1748 pci_release_regions(pdev); 1977 pci_release_regions(pdev);
1749out_disable_device: 1978out_disable_device:
1750 pci_disable_device(pdev); 1979 pci_disable_device(pdev);
1751out: 1980out:
1752 pci_set_drvdata(pdev, NULL); 1981 pci_set_drvdata(pdev, NULL);
1982 if (shost)
1983 scsi_host_put(shost);
1753 return error; 1984 return error;
1754} 1985}
1755 1986
1756static void __devexit 1987static void __devexit
1757lpfc_pci_remove_one(struct pci_dev *pdev) 1988lpfc_pci_remove_one(struct pci_dev *pdev)
1758{ 1989{
1759 struct Scsi_Host *host = pci_get_drvdata(pdev); 1990 struct Scsi_Host *shost = pci_get_drvdata(pdev);
1760 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata; 1991 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1992 struct lpfc_hba *phba = vport->phba;
1993 struct lpfc_vport *port_iterator;
1994 list_for_each_entry(port_iterator, &phba->port_list, listentry)
1995 port_iterator->load_flag |= FC_UNLOADING;
1996
1997 kfree(vport->vname);
1998 lpfc_free_sysfs_attr(vport);
1999
2000 fc_remove_host(shost);
2001 scsi_remove_host(shost);
2002
2003 /*
2004 * Bring down the SLI Layer. This step disable all interrupts,
2005 * clears the rings, discards all mailbox commands, and resets
2006 * the HBA.
2007 */
2008 lpfc_sli_hba_down(phba);
2009 lpfc_sli_brdrestart(phba);
2010
2011 lpfc_stop_phba_timers(phba);
2012 spin_lock_irq(&phba->hbalock);
2013 list_del_init(&vport->listentry);
2014 spin_unlock_irq(&phba->hbalock);
2015
1761 2016
1762 lpfc_remove_device(phba); 2017 lpfc_debugfs_terminate(vport);
2018 lpfc_cleanup(vport);
2019
2020 kthread_stop(phba->worker_thread);
2021
2022 /* Release the irq reservation */
2023 free_irq(phba->pcidev->irq, phba);
2024 pci_disable_msi(phba->pcidev);
1763 2025
1764 pci_set_drvdata(pdev, NULL); 2026 pci_set_drvdata(pdev, NULL);
2027 scsi_host_put(shost);
2028
2029 /*
2030 * Call scsi_free before mem_free since scsi bufs are released to their
2031 * corresponding pools here.
2032 */
2033 lpfc_scsi_free(phba);
2034 lpfc_mem_free(phba);
2035
2036 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
2037 phba->hbqslimp.phys);
2038
2039 /* Free resources associated with SLI2 interface */
2040 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
2041 phba->slim2p, phba->slim2p_mapping);
2042
2043 /* unmap adapter SLIM and Control Registers */
2044 iounmap(phba->ctrl_regs_memmap_p);
2045 iounmap(phba->slim_memmap_p);
2046
2047 idr_remove(&lpfc_hba_index, phba->brd_no);
2048
2049 kfree(phba);
2050
2051 pci_release_regions(pdev);
2052 pci_disable_device(pdev);
1765} 2053}
1766 2054
1767/** 2055/**
@@ -1819,10 +2107,13 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
1819 pci_set_master(pdev); 2107 pci_set_master(pdev);
1820 2108
1821 /* Re-establishing Link */ 2109 /* Re-establishing Link */
1822 spin_lock_irq(phba->host->host_lock); 2110 spin_lock_irq(host->host_lock);
1823 phba->fc_flag |= FC_ESTABLISH_LINK; 2111 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2112 spin_unlock_irq(host->host_lock);
2113
2114 spin_lock_irq(&phba->hbalock);
1824 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2115 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1825 spin_unlock_irq(phba->host->host_lock); 2116 spin_unlock_irq(&phba->hbalock);
1826 2117
1827 2118
1828 /* Take device offline; this will perform cleanup */ 2119 /* Take device offline; this will perform cleanup */
@@ -1932,7 +2223,7 @@ static struct pci_driver lpfc_driver = {
1932 .id_table = lpfc_id_table, 2223 .id_table = lpfc_id_table,
1933 .probe = lpfc_pci_probe_one, 2224 .probe = lpfc_pci_probe_one,
1934 .remove = __devexit_p(lpfc_pci_remove_one), 2225 .remove = __devexit_p(lpfc_pci_remove_one),
1935 .err_handler = &lpfc_err_handler, 2226 .err_handler = &lpfc_err_handler,
1936}; 2227};
1937 2228
1938static int __init 2229static int __init
@@ -1945,11 +2236,15 @@ lpfc_init(void)
1945 2236
1946 lpfc_transport_template = 2237 lpfc_transport_template =
1947 fc_attach_transport(&lpfc_transport_functions); 2238 fc_attach_transport(&lpfc_transport_functions);
1948 if (!lpfc_transport_template) 2239 lpfc_vport_transport_template =
2240 fc_attach_transport(&lpfc_vport_transport_functions);
2241 if (!lpfc_transport_template || !lpfc_vport_transport_template)
1949 return -ENOMEM; 2242 return -ENOMEM;
1950 error = pci_register_driver(&lpfc_driver); 2243 error = pci_register_driver(&lpfc_driver);
1951 if (error) 2244 if (error) {
1952 fc_release_transport(lpfc_transport_template); 2245 fc_release_transport(lpfc_transport_template);
2246 fc_release_transport(lpfc_vport_transport_template);
2247 }
1953 2248
1954 return error; 2249 return error;
1955} 2250}
@@ -1959,6 +2254,7 @@ lpfc_exit(void)
1959{ 2254{
1960 pci_unregister_driver(&lpfc_driver); 2255 pci_unregister_driver(&lpfc_driver);
1961 fc_release_transport(lpfc_transport_template); 2256 fc_release_transport(lpfc_transport_template);
2257 fc_release_transport(lpfc_vport_transport_template);
1962} 2258}
1963 2259
1964module_init(lpfc_init); 2260module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 438cbcd9eb13..8a6ceffeabcf 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -30,6 +30,7 @@
30#define LOG_SLI 0x800 /* SLI events */ 30#define LOG_SLI 0x800 /* SLI events */
31#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 31#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
32#define LOG_LIBDFC 0x2000 /* Libdfc events */ 32#define LOG_LIBDFC 0x2000 /* Libdfc events */
33#define LOG_VPORT 0x4000 /* NPIV events */
33#define LOG_ALL_MSG 0xffff /* LOG all messages */ 34#define LOG_ALL_MSG 0xffff /* LOG all messages */
34 35
35#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 36#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 8041c3f06f7b..8f42fbfdd29e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -82,6 +82,22 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
82} 82}
83 83
84/**********************************************/ 84/**********************************************/
85/* lpfc_heart_beat Issue a HEART_BEAT */
86/* mailbox command */
87/**********************************************/
88void
89lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
90{
91 MAILBOX_t *mb;
92
93 mb = &pmb->mb;
94 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
95 mb->mbxCommand = MBX_HEARTBEAT;
96 mb->mbxOwner = OWN_HOST;
97 return;
98}
99
100/**********************************************/
85/* lpfc_read_la Issue a READ LA */ 101/* lpfc_read_la Issue a READ LA */
86/* mailbox command */ 102/* mailbox command */
87/**********************************************/ 103/**********************************************/
@@ -134,6 +150,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
134void 150void
135lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 151lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
136{ 152{
153 struct lpfc_vport *vport = phba->pport;
137 MAILBOX_t *mb = &pmb->mb; 154 MAILBOX_t *mb = &pmb->mb;
138 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 155 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
139 156
@@ -147,7 +164,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
147 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count; 164 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
148 } 165 }
149 166
150 mb->un.varCfgLnk.myId = phba->fc_myDID; 167 mb->un.varCfgLnk.myId = vport->fc_myDID;
151 mb->un.varCfgLnk.edtov = phba->fc_edtov; 168 mb->un.varCfgLnk.edtov = phba->fc_edtov;
152 mb->un.varCfgLnk.arbtov = phba->fc_arbtov; 169 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
153 mb->un.varCfgLnk.ratov = phba->fc_ratov; 170 mb->un.varCfgLnk.ratov = phba->fc_ratov;
@@ -239,7 +256,7 @@ lpfc_init_link(struct lpfc_hba * phba,
239/* mailbox command */ 256/* mailbox command */
240/**********************************************/ 257/**********************************************/
241int 258int
242lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 259lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
243{ 260{
244 struct lpfc_dmabuf *mp; 261 struct lpfc_dmabuf *mp;
245 MAILBOX_t *mb; 262 MAILBOX_t *mb;
@@ -270,6 +287,7 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
270 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 287 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
271 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 288 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
272 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 289 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
290 mb->un.varRdSparm.vpi = vpi;
273 291
274 /* save address for completion */ 292 /* save address for completion */
275 pmb->context1 = mp; 293 pmb->context1 = mp;
@@ -282,7 +300,8 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
282/* mailbox command */ 300/* mailbox command */
283/********************************************/ 301/********************************************/
284void 302void
285lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb) 303lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
304 LPFC_MBOXQ_t * pmb)
286{ 305{
287 MAILBOX_t *mb; 306 MAILBOX_t *mb;
288 307
@@ -290,6 +309,7 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
290 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 309 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
291 310
292 mb->un.varUnregDID.did = did; 311 mb->un.varUnregDID.did = did;
312 mb->un.varUnregDID.vpi = vpi;
293 313
294 mb->mbxCommand = MBX_UNREG_D_ID; 314 mb->mbxCommand = MBX_UNREG_D_ID;
295 mb->mbxOwner = OWN_HOST; 315 mb->mbxOwner = OWN_HOST;
@@ -335,19 +355,17 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
335/* mailbox command */ 355/* mailbox command */
336/********************************************/ 356/********************************************/
337int 357int
338lpfc_reg_login(struct lpfc_hba * phba, 358lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
339 uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag) 359 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
340{ 360{
361 MAILBOX_t *mb = &pmb->mb;
341 uint8_t *sparam; 362 uint8_t *sparam;
342 struct lpfc_dmabuf *mp; 363 struct lpfc_dmabuf *mp;
343 MAILBOX_t *mb;
344 struct lpfc_sli *psli;
345 364
346 psli = &phba->sli;
347 mb = &pmb->mb;
348 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 365 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
349 366
350 mb->un.varRegLogin.rpi = 0; 367 mb->un.varRegLogin.rpi = 0;
368 mb->un.varRegLogin.vpi = vpi;
351 mb->un.varRegLogin.did = did; 369 mb->un.varRegLogin.did = did;
352 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 370 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
353 371
@@ -359,12 +377,10 @@ lpfc_reg_login(struct lpfc_hba * phba,
359 kfree(mp); 377 kfree(mp);
360 mb->mbxCommand = MBX_REG_LOGIN64; 378 mb->mbxCommand = MBX_REG_LOGIN64;
361 /* REG_LOGIN: no buffers */ 379 /* REG_LOGIN: no buffers */
362 lpfc_printf_log(phba, 380 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
363 KERN_WARNING, 381 "%d (%d):0302 REG_LOGIN: no buffers, DID x%x, "
364 LOG_MBOX, 382 "flag x%x\n",
365 "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n", 383 phba->brd_no, vpi, did, flag);
366 phba->brd_no,
367 (uint32_t) did, (uint32_t) flag);
368 return (1); 384 return (1);
369 } 385 }
370 INIT_LIST_HEAD(&mp->list); 386 INIT_LIST_HEAD(&mp->list);
@@ -389,7 +405,8 @@ lpfc_reg_login(struct lpfc_hba * phba,
389/* mailbox command */ 405/* mailbox command */
390/**********************************************/ 406/**********************************************/
391void 407void
392lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb) 408lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
409 LPFC_MBOXQ_t * pmb)
393{ 410{
394 MAILBOX_t *mb; 411 MAILBOX_t *mb;
395 412
@@ -398,12 +415,52 @@ lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
398 415
399 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 416 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
400 mb->un.varUnregLogin.rsvd1 = 0; 417 mb->un.varUnregLogin.rsvd1 = 0;
418 mb->un.varUnregLogin.vpi = vpi;
401 419
402 mb->mbxCommand = MBX_UNREG_LOGIN; 420 mb->mbxCommand = MBX_UNREG_LOGIN;
403 mb->mbxOwner = OWN_HOST; 421 mb->mbxOwner = OWN_HOST;
404 return; 422 return;
405} 423}
406 424
425/**************************************************/
426/* lpfc_reg_vpi Issue a REG_VPI */
427/* mailbox command */
428/**************************************************/
429void
430lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
431 LPFC_MBOXQ_t *pmb)
432{
433 MAILBOX_t *mb = &pmb->mb;
434
435 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
436
437 mb->un.varRegVpi.vpi = vpi;
438 mb->un.varRegVpi.sid = sid;
439
440 mb->mbxCommand = MBX_REG_VPI;
441 mb->mbxOwner = OWN_HOST;
442 return;
443
444}
445
446/**************************************************/
447/* lpfc_unreg_vpi Issue a UNREG_VNPI */
448/* mailbox command */
449/**************************************************/
450void
451lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
452{
453 MAILBOX_t *mb = &pmb->mb;
454 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
455
456 mb->un.varUnregVpi.vpi = vpi;
457
458 mb->mbxCommand = MBX_UNREG_VPI;
459 mb->mbxOwner = OWN_HOST;
460 return;
461
462}
463
407static void 464static void
408lpfc_config_pcb_setup(struct lpfc_hba * phba) 465lpfc_config_pcb_setup(struct lpfc_hba * phba)
409{ 466{
@@ -412,14 +469,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
412 PCB_t *pcbp = &phba->slim2p->pcb; 469 PCB_t *pcbp = &phba->slim2p->pcb;
413 dma_addr_t pdma_addr; 470 dma_addr_t pdma_addr;
414 uint32_t offset; 471 uint32_t offset;
415 uint32_t iocbCnt; 472 uint32_t iocbCnt = 0;
416 int i; 473 int i;
417 474
418 pcbp->maxRing = (psli->num_rings - 1); 475 pcbp->maxRing = (psli->num_rings - 1);
419 476
420 iocbCnt = 0;
421 for (i = 0; i < psli->num_rings; i++) { 477 for (i = 0; i < psli->num_rings; i++) {
422 pring = &psli->ring[i]; 478 pring = &psli->ring[i];
479
480 pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
481 SLI2_IOCB_CMD_SIZE;
482 pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
483 SLI2_IOCB_RSP_SIZE;
423 /* A ring MUST have both cmd and rsp entries defined to be 484 /* A ring MUST have both cmd and rsp entries defined to be
424 valid */ 485 valid */
425 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) { 486 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
@@ -434,20 +495,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
434 continue; 495 continue;
435 } 496 }
436 /* Command ring setup for ring */ 497 /* Command ring setup for ring */
437 pring->cmdringaddr = 498 pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
438 (void *)&phba->slim2p->IOCBs[iocbCnt];
439 pcbp->rdsc[i].cmdEntries = pring->numCiocb; 499 pcbp->rdsc[i].cmdEntries = pring->numCiocb;
440 500
441 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - 501 offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] -
442 (uint8_t *)phba->slim2p; 502 (uint8_t *) phba->slim2p;
443 pdma_addr = phba->slim2p_mapping + offset; 503 pdma_addr = phba->slim2p_mapping + offset;
444 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); 504 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
445 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); 505 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
446 iocbCnt += pring->numCiocb; 506 iocbCnt += pring->numCiocb;
447 507
448 /* Response ring setup for ring */ 508 /* Response ring setup for ring */
449 pring->rspringaddr = 509 pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
450 (void *)&phba->slim2p->IOCBs[iocbCnt];
451 510
452 pcbp->rdsc[i].rspEntries = pring->numRiocb; 511 pcbp->rdsc[i].rspEntries = pring->numRiocb;
453 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - 512 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
@@ -462,16 +521,108 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
462void 521void
463lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 522lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
464{ 523{
465 MAILBOX_t *mb; 524 MAILBOX_t *mb = &pmb->mb;
466
467 mb = &pmb->mb;
468 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 525 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
469 mb->un.varRdRev.cv = 1; 526 mb->un.varRdRev.cv = 1;
527 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
470 mb->mbxCommand = MBX_READ_REV; 528 mb->mbxCommand = MBX_READ_REV;
471 mb->mbxOwner = OWN_HOST; 529 mb->mbxOwner = OWN_HOST;
472 return; 530 return;
473} 531}
474 532
533static void
534lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
535 struct lpfc_hbq_init *hbq_desc)
536{
537 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
538 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
539 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
540}
541
542static void
543lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
544 struct lpfc_hbq_init *hbq_desc)
545{
546 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
547 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
548 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
549 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
550 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
551 sizeof(hbqmb->profiles.profile3.cmdmatch));
552}
553
554static void
555lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
556 struct lpfc_hbq_init *hbq_desc)
557{
558 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
559 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
560 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
561 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
562 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
563 sizeof(hbqmb->profiles.profile5.cmdmatch));
564}
565
566void
567lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
568 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
569{
570 int i;
571 MAILBOX_t *mb = &pmb->mb;
572 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
573
574 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
575 hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */
576 hbqmb->recvNotify = hbq_desc->rn; /* Receive
577 * Notification */
578 hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
579 * # in words 0-19 */
580 hbqmb->profile = hbq_desc->profile; /* Selection profile:
581 * 0 = all,
582 * 7 = logentry */
583 hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
584 * e.g. Ring0=b0001,
585 * ring2=b0100 */
586 hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4
587 * or 5 */
588 hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this
589 * HBQ will be used
590 * for LogEntry
591 * buffers */
592 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
593 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
594 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
595
596 mb->mbxCommand = MBX_CONFIG_HBQ;
597 mb->mbxOwner = OWN_HOST;
598
599 /* Copy info for profiles 2,3,5. Other
600 * profiles this area is reserved
601 */
602 if (hbq_desc->profile == 2)
603 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
604 else if (hbq_desc->profile == 3)
605 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
606 else if (hbq_desc->profile == 5)
607 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
608
609 /* Return if no rctl / type masks for this HBQ */
610 if (!hbq_desc->mask_count)
611 return;
612
613 /* Otherwise we setup specific rctl / type masks for this HBQ */
614 for (i = 0; i < hbq_desc->mask_count; i++) {
615 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
616 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
617 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
618 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
619 }
620
621 return;
622}
623
624
625
475void 626void
476lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 627lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
477{ 628{
@@ -514,15 +665,16 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
514} 665}
515 666
516void 667void
517lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 668lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
518{ 669{
670 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
519 MAILBOX_t *mb = &pmb->mb; 671 MAILBOX_t *mb = &pmb->mb;
520 dma_addr_t pdma_addr; 672 dma_addr_t pdma_addr;
521 uint32_t bar_low, bar_high; 673 uint32_t bar_low, bar_high;
522 size_t offset; 674 size_t offset;
523 struct lpfc_hgp hgp; 675 struct lpfc_hgp hgp;
524 void __iomem *to_slim;
525 int i; 676 int i;
677 uint32_t pgp_offset;
526 678
527 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 679 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
528 mb->mbxCommand = MBX_CONFIG_PORT; 680 mb->mbxCommand = MBX_CONFIG_PORT;
@@ -535,12 +687,29 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
535 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); 687 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
536 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); 688 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
537 689
690 /* If HBA supports SLI=3 ask for it */
691
692 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
693 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
694 mb->un.varCfgPort.max_hbq = 1; /* Requesting 2 HBQs */
695 if (phba->max_vpi && phba->cfg_npiv_enable &&
696 phba->vpd.sli3Feat.cmv) {
697 mb->un.varCfgPort.max_vpi = phba->max_vpi;
698 mb->un.varCfgPort.cmv = 1;
699 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
700 } else
701 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
702 } else
703 phba->sli_rev = 2;
704 mb->un.varCfgPort.sli_mode = phba->sli_rev;
705
538 /* Now setup pcb */ 706 /* Now setup pcb */
539 phba->slim2p->pcb.type = TYPE_NATIVE_SLI2; 707 phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
540 phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2; 708 phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
541 709
542 /* Setup Mailbox pointers */ 710 /* Setup Mailbox pointers */
543 phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t); 711 phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) +
712 sizeof(struct sli2_desc);
544 offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p; 713 offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
545 pdma_addr = phba->slim2p_mapping + offset; 714 pdma_addr = phba->slim2p_mapping + offset;
546 phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr); 715 phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
@@ -568,29 +737,70 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
568 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low); 737 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
569 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high); 738 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
570 739
740 /*
741 * Set up HGP - Port Memory
742 *
743 * The port expects the host get/put pointers to reside in memory
744 * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
745 * area of SLIM. In SLI-2 mode, there's an additional 16 reserved
746 * words (0x40 bytes). This area is not reserved if HBQs are
747 * configured in SLI-3.
748 *
749 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
750 * RR0Get 0xc4 0x84
751 * CR1Put 0xc8 0x88
752 * RR1Get 0xcc 0x8c
753 * CR2Put 0xd0 0x90
754 * RR2Get 0xd4 0x94
755 * CR3Put 0xd8 0x98
756 * RR3Get 0xdc 0x9c
757 *
758 * Reserved 0xa0-0xbf
759 * If HBQs configured:
760 * HBQ 0 Put ptr 0xc0
761 * HBQ 1 Put ptr 0xc4
762 * HBQ 2 Put ptr 0xc8
763 * ......
764 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
765 *
766 */
767
768 if (phba->sli_rev == 3) {
769 phba->host_gp = &mb_slim->us.s3.host[0];
770 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
771 } else {
772 phba->host_gp = &mb_slim->us.s2.host[0];
773 phba->hbq_put = NULL;
774 }
571 775
572 /* mask off BAR0's flag bits 0 - 3 */ 776 /* mask off BAR0's flag bits 0 - 3 */
573 phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + 777 phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
574 (SLIMOFF*sizeof(uint32_t)); 778 (void __iomem *) phba->host_gp -
779 (void __iomem *)phba->MBslimaddr;
575 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) 780 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
576 phba->slim2p->pcb.hgpAddrHigh = bar_high; 781 phba->slim2p->pcb.hgpAddrHigh = bar_high;
577 else 782 else
578 phba->slim2p->pcb.hgpAddrHigh = 0; 783 phba->slim2p->pcb.hgpAddrHigh = 0;
579 /* write HGP data to SLIM at the required longword offset */ 784 /* write HGP data to SLIM at the required longword offset */
580 memset(&hgp, 0, sizeof(struct lpfc_hgp)); 785 memset(&hgp, 0, sizeof(struct lpfc_hgp));
581 to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
582 786
583 for (i=0; i < phba->sli.num_rings; i++) { 787 for (i=0; i < phba->sli.num_rings; i++) {
584 lpfc_memcpy_to_slim(to_slim, &hgp, sizeof(struct lpfc_hgp)); 788 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
585 to_slim += sizeof (struct lpfc_hgp); 789 sizeof(*phba->host_gp));
586 } 790 }
587 791
588 /* Setup Port Group ring pointer */ 792 /* Setup Port Group ring pointer */
589 offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port - 793 if (phba->sli_rev == 3)
590 (uint8_t *)phba->slim2p; 794 pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port -
591 pdma_addr = phba->slim2p_mapping + offset; 795 (uint8_t *)phba->slim2p;
796 else
797 pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
798 (uint8_t *)phba->slim2p;
799
800 pdma_addr = phba->slim2p_mapping + pgp_offset;
592 phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr); 801 phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
593 phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr); 802 phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
803 phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0];
594 804
595 /* Use callback routine to setp rings in the pcb */ 805 /* Use callback routine to setp rings in the pcb */
596 lpfc_config_pcb_setup(phba); 806 lpfc_config_pcb_setup(phba);
@@ -606,11 +816,7 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
606 816
607 /* Swap PCB if needed */ 817 /* Swap PCB if needed */
608 lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb, 818 lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
609 sizeof (PCB_t)); 819 sizeof(PCB_t));
610
611 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
612 "%d:0405 Service Level Interface (SLI) 2 selected\n",
613 phba->brd_no);
614} 820}
615 821
616void 822void
@@ -644,15 +850,23 @@ lpfc_mbox_get(struct lpfc_hba * phba)
644 LPFC_MBOXQ_t *mbq = NULL; 850 LPFC_MBOXQ_t *mbq = NULL;
645 struct lpfc_sli *psli = &phba->sli; 851 struct lpfc_sli *psli = &phba->sli;
646 852
647 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, 853 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
648 list); 854 if (mbq)
649 if (mbq) {
650 psli->mboxq_cnt--; 855 psli->mboxq_cnt--;
651 }
652 856
653 return mbq; 857 return mbq;
654} 858}
655 859
860void
861lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
862{
863 /* This function expects to be called from interupt context */
864 spin_lock(&phba->hbalock);
865 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
866 spin_unlock(&phba->hbalock);
867 return;
868}
869
656int 870int
657lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) 871lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
658{ 872{
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ec3bbbde6f7a..3594c469494f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -38,10 +38,13 @@
38#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 38#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
40 40
41
42
41int 43int
42lpfc_mem_alloc(struct lpfc_hba * phba) 44lpfc_mem_alloc(struct lpfc_hba * phba)
43{ 45{
44 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 46 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
47 int longs;
45 int i; 48 int i;
46 49
47 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 50 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
@@ -80,10 +83,27 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
80 if (!phba->nlp_mem_pool) 83 if (!phba->nlp_mem_pool)
81 goto fail_free_mbox_pool; 84 goto fail_free_mbox_pool;
82 85
86 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev,
87 LPFC_BPL_SIZE, 8, 0);
88 if (!phba->lpfc_hbq_pool)
89 goto fail_free_nlp_mem_pool;
90
91 /* vpi zero is reserved for the physical port so add 1 to max */
92 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
93 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
94 if (!phba->vpi_bmask)
95 goto fail_free_hbq_pool;
96
83 return 0; 97 return 0;
84 98
99 fail_free_hbq_pool:
100 lpfc_sli_hbqbuf_free_all(phba);
101 fail_free_nlp_mem_pool:
102 mempool_destroy(phba->nlp_mem_pool);
103 phba->nlp_mem_pool = NULL;
85 fail_free_mbox_pool: 104 fail_free_mbox_pool:
86 mempool_destroy(phba->mbox_mem_pool); 105 mempool_destroy(phba->mbox_mem_pool);
106 phba->mbox_mem_pool = NULL;
87 fail_free_mbuf_pool: 107 fail_free_mbuf_pool:
88 while (i--) 108 while (i--)
89 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 109 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
@@ -91,8 +111,10 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
91 kfree(pool->elements); 111 kfree(pool->elements);
92 fail_free_lpfc_mbuf_pool: 112 fail_free_lpfc_mbuf_pool:
93 pci_pool_destroy(phba->lpfc_mbuf_pool); 113 pci_pool_destroy(phba->lpfc_mbuf_pool);
114 phba->lpfc_mbuf_pool = NULL;
94 fail_free_dma_buf_pool: 115 fail_free_dma_buf_pool:
95 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 116 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
117 phba->lpfc_scsi_dma_buf_pool = NULL;
96 fail: 118 fail:
97 return -ENOMEM; 119 return -ENOMEM;
98} 120}
@@ -106,6 +128,9 @@ lpfc_mem_free(struct lpfc_hba * phba)
106 struct lpfc_dmabuf *mp; 128 struct lpfc_dmabuf *mp;
107 int i; 129 int i;
108 130
131 kfree(phba->vpi_bmask);
132 lpfc_sli_hbqbuf_free_all(phba);
133
109 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 134 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
110 mp = (struct lpfc_dmabuf *) (mbox->context1); 135 mp = (struct lpfc_dmabuf *) (mbox->context1);
111 if (mp) { 136 if (mp) {
@@ -115,6 +140,15 @@ lpfc_mem_free(struct lpfc_hba * phba)
115 list_del(&mbox->list); 140 list_del(&mbox->list);
116 mempool_free(mbox, phba->mbox_mem_pool); 141 mempool_free(mbox, phba->mbox_mem_pool);
117 } 142 }
143 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
144 mp = (struct lpfc_dmabuf *) (mbox->context1);
145 if (mp) {
146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
147 kfree(mp);
148 }
149 list_del(&mbox->list);
150 mempool_free(mbox, phba->mbox_mem_pool);
151 }
118 152
119 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 153 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
120 if (psli->mbox_active) { 154 if (psli->mbox_active) {
@@ -132,13 +166,21 @@ lpfc_mem_free(struct lpfc_hba * phba)
132 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 166 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
133 pool->elements[i].phys); 167 pool->elements[i].phys);
134 kfree(pool->elements); 168 kfree(pool->elements);
169
170 pci_pool_destroy(phba->lpfc_hbq_pool);
135 mempool_destroy(phba->nlp_mem_pool); 171 mempool_destroy(phba->nlp_mem_pool);
136 mempool_destroy(phba->mbox_mem_pool); 172 mempool_destroy(phba->mbox_mem_pool);
137 173
138 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 174 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
139 pci_pool_destroy(phba->lpfc_mbuf_pool); 175 pci_pool_destroy(phba->lpfc_mbuf_pool);
140 176
141 /* Free the iocb lookup array */ 177 phba->lpfc_hbq_pool = NULL;
178 phba->nlp_mem_pool = NULL;
179 phba->mbox_mem_pool = NULL;
180 phba->lpfc_scsi_dma_buf_pool = NULL;
181 phba->lpfc_mbuf_pool = NULL;
182
183 /* Free the iocb lookup array */
142 kfree(psli->iocbq_lookup); 184 kfree(psli->iocbq_lookup);
143 psli->iocbq_lookup = NULL; 185 psli->iocbq_lookup = NULL;
144 186
@@ -148,20 +190,23 @@ void *
148lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 190lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
149{ 191{
150 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 192 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
193 unsigned long iflags;
151 void *ret; 194 void *ret;
152 195
153 ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); 196 ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
154 197
155 if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) { 198 spin_lock_irqsave(&phba->hbalock, iflags);
199 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
156 pool->current_count--; 200 pool->current_count--;
157 ret = pool->elements[pool->current_count].virt; 201 ret = pool->elements[pool->current_count].virt;
158 *handle = pool->elements[pool->current_count].phys; 202 *handle = pool->elements[pool->current_count].phys;
159 } 203 }
204 spin_unlock_irqrestore(&phba->hbalock, iflags);
160 return ret; 205 return ret;
161} 206}
162 207
163void 208void
164lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 209__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
165{ 210{
166 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 211 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
167 212
@@ -174,3 +219,51 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
174 } 219 }
175 return; 220 return;
176} 221}
222
223void
224lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
225{
226 unsigned long iflags;
227
228 spin_lock_irqsave(&phba->hbalock, iflags);
229 __lpfc_mbuf_free(phba, virt, dma);
230 spin_unlock_irqrestore(&phba->hbalock, iflags);
231 return;
232}
233
234void *
235lpfc_hbq_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
236{
237 void *ret;
238 ret = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_ATOMIC, handle);
239 return ret;
240}
241
242void
243lpfc_hbq_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
244{
245 pci_pool_free(phba->lpfc_hbq_pool, virt, dma);
246 return;
247}
248
249void
250lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
251{
252 struct hbq_dmabuf *hbq_entry;
253
254 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
255 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
256 if (hbq_entry->tag == -1) {
257 lpfc_hbq_free(phba, hbq_entry->dbuf.virt,
258 hbq_entry->dbuf.phys);
259 kfree(hbq_entry);
260 } else {
261 lpfc_sli_free_hbq(phba, hbq_entry);
262 }
263 } else {
264 lpfc_mbuf_free(phba, mp->virt, mp->phys);
265 kfree(mp);
266 }
267 return;
268}
269
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b309841e3846..bca2f5c9b4ba 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,4 +1,4 @@
1/******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
@@ -35,20 +35,22 @@
35#include "lpfc.h" 35#include "lpfc.h"
36#include "lpfc_logmsg.h" 36#include "lpfc_logmsg.h"
37#include "lpfc_crtn.h" 37#include "lpfc_crtn.h"
38#include "lpfc_vport.h"
39#include "lpfc_debugfs.h"
38 40
39 41
40/* Called to verify a rcv'ed ADISC was intended for us. */ 42/* Called to verify a rcv'ed ADISC was intended for us. */
41static int 43static int
42lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 44lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
43 struct lpfc_name * nn, struct lpfc_name * pn) 45 struct lpfc_name *nn, struct lpfc_name *pn)
44{ 46{
45 /* Compare the ADISC rsp WWNN / WWPN matches our internal node 47 /* Compare the ADISC rsp WWNN / WWPN matches our internal node
46 * table entry for that node. 48 * table entry for that node.
47 */ 49 */
48 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0) 50 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
49 return 0; 51 return 0;
50 52
51 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0) 53 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
52 return 0; 54 return 0;
53 55
54 /* we match, return success */ 56 /* we match, return success */
@@ -56,11 +58,10 @@ lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
56} 58}
57 59
58int 60int
59lpfc_check_sparm(struct lpfc_hba * phba, 61lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
60 struct lpfc_nodelist * ndlp, struct serv_parm * sp, 62 struct serv_parm * sp, uint32_t class)
61 uint32_t class)
62{ 63{
63 volatile struct serv_parm *hsp = &phba->fc_sparam; 64 volatile struct serv_parm *hsp = &vport->fc_sparam;
64 uint16_t hsp_value, ssp_value = 0; 65 uint16_t hsp_value, ssp_value = 0;
65 66
66 /* 67 /*
@@ -75,12 +76,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
75 hsp->cls1.rcvDataSizeLsb; 76 hsp->cls1.rcvDataSizeLsb;
76 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 77 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
77 sp->cls1.rcvDataSizeLsb; 78 sp->cls1.rcvDataSizeLsb;
79 if (!ssp_value)
80 goto bad_service_param;
78 if (ssp_value > hsp_value) { 81 if (ssp_value > hsp_value) {
79 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 82 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
80 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 83 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
81 } 84 }
82 } else if (class == CLASS1) { 85 } else if (class == CLASS1) {
83 return 0; 86 goto bad_service_param;
84 } 87 }
85 88
86 if (sp->cls2.classValid) { 89 if (sp->cls2.classValid) {
@@ -88,12 +91,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
88 hsp->cls2.rcvDataSizeLsb; 91 hsp->cls2.rcvDataSizeLsb;
89 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 92 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
90 sp->cls2.rcvDataSizeLsb; 93 sp->cls2.rcvDataSizeLsb;
94 if (!ssp_value)
95 goto bad_service_param;
91 if (ssp_value > hsp_value) { 96 if (ssp_value > hsp_value) {
92 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 97 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
93 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 98 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
94 } 99 }
95 } else if (class == CLASS2) { 100 } else if (class == CLASS2) {
96 return 0; 101 goto bad_service_param;
97 } 102 }
98 103
99 if (sp->cls3.classValid) { 104 if (sp->cls3.classValid) {
@@ -101,12 +106,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
101 hsp->cls3.rcvDataSizeLsb; 106 hsp->cls3.rcvDataSizeLsb;
102 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 107 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
103 sp->cls3.rcvDataSizeLsb; 108 sp->cls3.rcvDataSizeLsb;
109 if (!ssp_value)
110 goto bad_service_param;
104 if (ssp_value > hsp_value) { 111 if (ssp_value > hsp_value) {
105 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 112 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
106 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 113 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
107 } 114 }
108 } else if (class == CLASS3) { 115 } else if (class == CLASS3) {
109 return 0; 116 goto bad_service_param;
110 } 117 }
111 118
112 /* 119 /*
@@ -125,12 +132,22 @@ lpfc_check_sparm(struct lpfc_hba * phba,
125 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 132 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
126 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 133 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
127 return 1; 134 return 1;
135bad_service_param:
136 lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
137 "%d (%d):0207 Device %x "
138 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
139 "invalid service parameters. Ignoring device.\n",
140 vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
141 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
142 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
143 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
144 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
145 return 0;
128} 146}
129 147
130static void * 148static void *
131lpfc_check_elscmpl_iocb(struct lpfc_hba * phba, 149lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
132 struct lpfc_iocbq *cmdiocb, 150 struct lpfc_iocbq *rspiocb)
133 struct lpfc_iocbq *rspiocb)
134{ 151{
135 struct lpfc_dmabuf *pcmd, *prsp; 152 struct lpfc_dmabuf *pcmd, *prsp;
136 uint32_t *lp; 153 uint32_t *lp;
@@ -168,32 +185,29 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
168 * routine effectively results in a "software abort". 185 * routine effectively results in a "software abort".
169 */ 186 */
170int 187int
171lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 188lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
172{ 189{
173 LIST_HEAD(completions); 190 LIST_HEAD(completions);
174 struct lpfc_sli *psli; 191 struct lpfc_sli *psli = &phba->sli;
175 struct lpfc_sli_ring *pring; 192 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
176 struct lpfc_iocbq *iocb, *next_iocb; 193 struct lpfc_iocbq *iocb, *next_iocb;
177 IOCB_t *cmd; 194 IOCB_t *cmd;
178 195
179 /* Abort outstanding I/O on NPort <nlp_DID> */ 196 /* Abort outstanding I/O on NPort <nlp_DID> */
180 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 197 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
181 "%d:0205 Abort outstanding I/O on NPort x%x " 198 "%d (%d):0205 Abort outstanding I/O on NPort x%x "
182 "Data: x%x x%x x%x\n", 199 "Data: x%x x%x x%x\n",
183 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 200 phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
184 ndlp->nlp_state, ndlp->nlp_rpi); 201 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
185 202
186 psli = &phba->sli; 203 lpfc_fabric_abort_nport(ndlp);
187 pring = &psli->ring[LPFC_ELS_RING];
188 204
189 /* First check the txq */ 205 /* First check the txq */
190 spin_lock_irq(phba->host->host_lock); 206 spin_lock_irq(&phba->hbalock);
191 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 207 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
192 /* Check to see if iocb matches the nport we are looking 208 /* Check to see if iocb matches the nport we are looking for */
193 for */
194 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 209 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
195 /* It matches, so deque and call compl with an 210 /* It matches, so deque and call compl with anp error */
196 error */
197 list_move_tail(&iocb->list, &completions); 211 list_move_tail(&iocb->list, &completions);
198 pring->txq_cnt--; 212 pring->txq_cnt--;
199 } 213 }
@@ -201,37 +215,39 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
201 215
202 /* Next check the txcmplq */ 216 /* Next check the txcmplq */
203 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 217 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
204 /* Check to see if iocb matches the nport we are looking 218 /* Check to see if iocb matches the nport we are looking for */
205 for */ 219 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
206 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
207 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 220 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
221 }
208 } 222 }
209 spin_unlock_irq(phba->host->host_lock); 223 spin_unlock_irq(&phba->hbalock);
210 224
211 while (!list_empty(&completions)) { 225 while (!list_empty(&completions)) {
212 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 226 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
213 cmd = &iocb->iocb; 227 cmd = &iocb->iocb;
214 list_del(&iocb->list); 228 list_del_init(&iocb->list);
215 229
216 if (iocb->iocb_cmpl) { 230 if (!iocb->iocb_cmpl)
231 lpfc_sli_release_iocbq(phba, iocb);
232 else {
217 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 233 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
218 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 234 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
219 (iocb->iocb_cmpl) (phba, iocb, iocb); 235 (iocb->iocb_cmpl) (phba, iocb, iocb);
220 } else 236 }
221 lpfc_sli_release_iocbq(phba, iocb);
222 } 237 }
223 238
224 /* If we are delaying issuing an ELS command, cancel it */ 239 /* If we are delaying issuing an ELS command, cancel it */
225 if (ndlp->nlp_flag & NLP_DELAY_TMO) 240 if (ndlp->nlp_flag & NLP_DELAY_TMO)
226 lpfc_cancel_retry_delay_tmo(phba, ndlp); 241 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
227 return 0; 242 return 0;
228} 243}
229 244
230static int 245static int
231lpfc_rcv_plogi(struct lpfc_hba * phba, 246lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
232 struct lpfc_nodelist * ndlp, 247 struct lpfc_iocbq *cmdiocb)
233 struct lpfc_iocbq *cmdiocb)
234{ 248{
249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
250 struct lpfc_hba *phba = vport->phba;
235 struct lpfc_dmabuf *pcmd; 251 struct lpfc_dmabuf *pcmd;
236 uint32_t *lp; 252 uint32_t *lp;
237 IOCB_t *icmd; 253 IOCB_t *icmd;
@@ -241,14 +257,14 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
241 int rc; 257 int rc;
242 258
243 memset(&stat, 0, sizeof (struct ls_rjt)); 259 memset(&stat, 0, sizeof (struct ls_rjt));
244 if (phba->hba_state <= LPFC_FLOGI) { 260 if (vport->port_state <= LPFC_FLOGI) {
245 /* Before responding to PLOGI, check for pt2pt mode. 261 /* Before responding to PLOGI, check for pt2pt mode.
246 * If we are pt2pt, with an outstanding FLOGI, abort 262 * If we are pt2pt, with an outstanding FLOGI, abort
247 * the FLOGI and resend it first. 263 * the FLOGI and resend it first.
248 */ 264 */
249 if (phba->fc_flag & FC_PT2PT) { 265 if (vport->fc_flag & FC_PT2PT) {
250 lpfc_els_abort_flogi(phba); 266 lpfc_els_abort_flogi(phba);
251 if (!(phba->fc_flag & FC_PT2PT_PLOGI)) { 267 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
252 /* If the other side is supposed to initiate 268 /* If the other side is supposed to initiate
253 * the PLOGI anyway, just ACC it now and 269 * the PLOGI anyway, just ACC it now and
254 * move on with discovery. 270 * move on with discovery.
@@ -257,45 +273,42 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
257 phba->fc_ratov = FF_DEF_RATOV; 273 phba->fc_ratov = FF_DEF_RATOV;
258 /* Start discovery - this should just do 274 /* Start discovery - this should just do
259 CLEAR_LA */ 275 CLEAR_LA */
260 lpfc_disc_start(phba); 276 lpfc_disc_start(vport);
261 } else { 277 } else
262 lpfc_initial_flogi(phba); 278 lpfc_initial_flogi(vport);
263 }
264 } else { 279 } else {
265 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 280 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
266 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 281 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
267 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, 282 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
268 ndlp); 283 ndlp, NULL);
269 return 0; 284 return 0;
270 } 285 }
271 } 286 }
272 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 287 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
273 lp = (uint32_t *) pcmd->virt; 288 lp = (uint32_t *) pcmd->virt;
274 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 289 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
275 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) { 290 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
276 /* Reject this request because invalid parameters */ 291 /* Reject this request because invalid parameters */
277 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 292 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
278 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 293 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
279 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 294 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
295 NULL);
280 return 0; 296 return 0;
281 } 297 }
282 icmd = &cmdiocb->iocb; 298 icmd = &cmdiocb->iocb;
283 299
284 /* PLOGI chkparm OK */ 300 /* PLOGI chkparm OK */
285 lpfc_printf_log(phba, 301 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
286 KERN_INFO, 302 "%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
287 LOG_ELS, 303 phba->brd_no, vport->vpi,
288 "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
289 phba->brd_no,
290 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 304 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
291 ndlp->nlp_rpi); 305 ndlp->nlp_rpi);
292 306
293 if ((phba->cfg_fcp_class == 2) && 307 if (phba->cfg_fcp_class == 2 && sp->cls2.classValid)
294 (sp->cls2.classValid)) {
295 ndlp->nlp_fcp_info |= CLASS2; 308 ndlp->nlp_fcp_info |= CLASS2;
296 } else { 309 else
297 ndlp->nlp_fcp_info |= CLASS3; 310 ndlp->nlp_fcp_info |= CLASS3;
298 } 311
299 ndlp->nlp_class_sup = 0; 312 ndlp->nlp_class_sup = 0;
300 if (sp->cls1.classValid) 313 if (sp->cls1.classValid)
301 ndlp->nlp_class_sup |= FC_COS_CLASS1; 314 ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -317,35 +330,37 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
317 case NLP_STE_PRLI_ISSUE: 330 case NLP_STE_PRLI_ISSUE:
318 case NLP_STE_UNMAPPED_NODE: 331 case NLP_STE_UNMAPPED_NODE:
319 case NLP_STE_MAPPED_NODE: 332 case NLP_STE_MAPPED_NODE:
320 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0); 333 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
321 return 1; 334 return 1;
322 } 335 }
323 336
324 if ((phba->fc_flag & FC_PT2PT) 337 if ((vport->fc_flag & FC_PT2PT) &&
325 && !(phba->fc_flag & FC_PT2PT_PLOGI)) { 338 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
326 /* rcv'ed PLOGI decides what our NPortId will be */ 339 /* rcv'ed PLOGI decides what our NPortId will be */
327 phba->fc_myDID = icmd->un.rcvels.parmRo; 340 vport->fc_myDID = icmd->un.rcvels.parmRo;
328 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 341 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
329 if (mbox == NULL) 342 if (mbox == NULL)
330 goto out; 343 goto out;
331 lpfc_config_link(phba, mbox); 344 lpfc_config_link(phba, mbox);
332 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 345 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
346 mbox->vport = vport;
333 rc = lpfc_sli_issue_mbox 347 rc = lpfc_sli_issue_mbox
334 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 348 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
335 if (rc == MBX_NOT_FINISHED) { 349 if (rc == MBX_NOT_FINISHED) {
336 mempool_free( mbox, phba->mbox_mem_pool); 350 mempool_free(mbox, phba->mbox_mem_pool);
337 goto out; 351 goto out;
338 } 352 }
339 353
340 lpfc_can_disctmo(phba); 354 lpfc_can_disctmo(vport);
341 } 355 }
342 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 356 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
343 if (mbox == NULL) 357 if (!mbox)
344 goto out; 358 goto out;
345 359
346 if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID, 360 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
347 (uint8_t *) sp, mbox, 0)) { 361 (uint8_t *) sp, mbox, 0);
348 mempool_free( mbox, phba->mbox_mem_pool); 362 if (rc) {
363 mempool_free(mbox, phba->mbox_mem_pool);
349 goto out; 364 goto out;
350 } 365 }
351 366
@@ -357,7 +372,10 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
357 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox 372 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
358 * command issued in lpfc_cmpl_els_acc(). 373 * command issued in lpfc_cmpl_els_acc().
359 */ 374 */
375 mbox->vport = vport;
376 spin_lock_irq(shost->host_lock);
360 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 377 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
378 spin_unlock_irq(shost->host_lock);
361 379
362 /* 380 /*
363 * If there is an outstanding PLOGI issued, abort it before 381 * If there is an outstanding PLOGI issued, abort it before
@@ -373,24 +391,41 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
373 lpfc_els_abort(phba, ndlp); 391 lpfc_els_abort(phba, ndlp);
374 } 392 }
375 393
376 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); 394 if ((vport->port_type == LPFC_NPIV_PORT &&
395 phba->cfg_vport_restrict_login)) {
396
397 /* In order to preserve RPIs, we want to cleanup
398 * the default RPI the firmware created to rcv
399 * this ELS request. The only way to do this is
400 * to register, then unregister the RPI.
401 */
402 spin_lock_irq(shost->host_lock);
403 ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
404 spin_unlock_irq(shost->host_lock);
405 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
406 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
407 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
408 ndlp, mbox);
409 return 1;
410 }
411 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
377 return 1; 412 return 1;
378 413
379out: 414out:
380 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 415 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
381 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 416 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
382 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 417 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
383 return 0; 418 return 0;
384} 419}
385 420
386static int 421static int
387lpfc_rcv_padisc(struct lpfc_hba * phba, 422lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
388 struct lpfc_nodelist * ndlp,
389 struct lpfc_iocbq *cmdiocb) 423 struct lpfc_iocbq *cmdiocb)
390{ 424{
425 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
391 struct lpfc_dmabuf *pcmd; 426 struct lpfc_dmabuf *pcmd;
392 struct serv_parm *sp; 427 struct serv_parm *sp;
393 struct lpfc_name *pnn, *ppn; 428 struct lpfc_name *pnn, *ppn;
394 struct ls_rjt stat; 429 struct ls_rjt stat;
395 ADISC *ap; 430 ADISC *ap;
396 IOCB_t *icmd; 431 IOCB_t *icmd;
@@ -412,13 +447,12 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
412 } 447 }
413 448
414 icmd = &cmdiocb->iocb; 449 icmd = &cmdiocb->iocb;
415 if ((icmd->ulpStatus == 0) && 450 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
416 (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
417 if (cmd == ELS_CMD_ADISC) { 451 if (cmd == ELS_CMD_ADISC) {
418 lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp); 452 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
419 } else { 453 } else {
420 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, 454 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
421 NULL, 0); 455 NULL, 0);
422 } 456 }
423 return 1; 457 return 1;
424 } 458 }
@@ -427,55 +461,57 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
427 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 461 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
428 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 462 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
429 stat.un.b.vendorUnique = 0; 463 stat.un.b.vendorUnique = 0;
430 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 464 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
431 465
432 /* 1 sec timeout */ 466 /* 1 sec timeout */
433 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 467 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
434 468
435 spin_lock_irq(phba->host->host_lock); 469 spin_lock_irq(shost->host_lock);
436 ndlp->nlp_flag |= NLP_DELAY_TMO; 470 ndlp->nlp_flag |= NLP_DELAY_TMO;
437 spin_unlock_irq(phba->host->host_lock); 471 spin_unlock_irq(shost->host_lock);
438 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 472 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
439 ndlp->nlp_prev_state = ndlp->nlp_state; 473 ndlp->nlp_prev_state = ndlp->nlp_state;
440 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 474 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
441 return 0; 475 return 0;
442} 476}
443 477
444static int 478static int
445lpfc_rcv_logo(struct lpfc_hba * phba, 479lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
446 struct lpfc_nodelist * ndlp, 480 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
447 struct lpfc_iocbq *cmdiocb,
448 uint32_t els_cmd)
449{ 481{
450 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ 482 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
483
484 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
451 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 485 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
452 * PLOGIs during LOGO storms from a device. 486 * PLOGIs during LOGO storms from a device.
453 */ 487 */
488 spin_lock_irq(shost->host_lock);
454 ndlp->nlp_flag |= NLP_LOGO_ACC; 489 ndlp->nlp_flag |= NLP_LOGO_ACC;
490 spin_unlock_irq(shost->host_lock);
455 if (els_cmd == ELS_CMD_PRLO) 491 if (els_cmd == ELS_CMD_PRLO)
456 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 492 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
457 else 493 else
458 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 494 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
459 495
460 if (!(ndlp->nlp_type & NLP_FABRIC) || 496 if (!(ndlp->nlp_type & NLP_FABRIC) ||
461 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 497 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
462 /* Only try to re-login if this is NOT a Fabric Node */ 498 /* Only try to re-login if this is NOT a Fabric Node */
463 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 499 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
464 spin_lock_irq(phba->host->host_lock); 500 spin_lock_irq(shost->host_lock);
465 ndlp->nlp_flag |= NLP_DELAY_TMO; 501 ndlp->nlp_flag |= NLP_DELAY_TMO;
466 spin_unlock_irq(phba->host->host_lock); 502 spin_unlock_irq(shost->host_lock);
467 503
468 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 504 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
469 ndlp->nlp_prev_state = ndlp->nlp_state; 505 ndlp->nlp_prev_state = ndlp->nlp_state;
470 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 506 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
471 } else { 507 } else {
472 ndlp->nlp_prev_state = ndlp->nlp_state; 508 ndlp->nlp_prev_state = ndlp->nlp_state;
473 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 509 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
474 } 510 }
475 511
476 spin_lock_irq(phba->host->host_lock); 512 spin_lock_irq(shost->host_lock);
477 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 513 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
478 spin_unlock_irq(phba->host->host_lock); 514 spin_unlock_irq(shost->host_lock);
479 /* The driver has to wait until the ACC completes before it continues 515 /* The driver has to wait until the ACC completes before it continues
480 * processing the LOGO. The action will resume in 516 * processing the LOGO. The action will resume in
481 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an 517 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
@@ -485,9 +521,8 @@ lpfc_rcv_logo(struct lpfc_hba * phba,
485} 521}
486 522
487static void 523static void
488lpfc_rcv_prli(struct lpfc_hba * phba, 524lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
489 struct lpfc_nodelist * ndlp, 525 struct lpfc_iocbq *cmdiocb)
490 struct lpfc_iocbq *cmdiocb)
491{ 526{
492 struct lpfc_dmabuf *pcmd; 527 struct lpfc_dmabuf *pcmd;
493 uint32_t *lp; 528 uint32_t *lp;
@@ -501,8 +536,7 @@ lpfc_rcv_prli(struct lpfc_hba * phba,
501 536
502 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 537 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
503 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 538 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
504 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 539 if (npr->prliType == PRLI_FCP_TYPE) {
505 (npr->prliType == PRLI_FCP_TYPE)) {
506 if (npr->initiatorFunc) 540 if (npr->initiatorFunc)
507 ndlp->nlp_type |= NLP_FCP_INITIATOR; 541 ndlp->nlp_type |= NLP_FCP_INITIATOR;
508 if (npr->targetFunc) 542 if (npr->targetFunc)
@@ -517,36 +551,42 @@ lpfc_rcv_prli(struct lpfc_hba * phba,
517 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 551 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
518 if (ndlp->nlp_type & NLP_FCP_TARGET) 552 if (ndlp->nlp_type & NLP_FCP_TARGET)
519 roles |= FC_RPORT_ROLE_FCP_TARGET; 553 roles |= FC_RPORT_ROLE_FCP_TARGET;
554
555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
556 "rport rolechg: role:x%x did:x%x flg:x%x",
557 roles, ndlp->nlp_DID, ndlp->nlp_flag);
558
520 fc_remote_port_rolechg(rport, roles); 559 fc_remote_port_rolechg(rport, roles);
521 } 560 }
522} 561}
523 562
524static uint32_t 563static uint32_t
525lpfc_disc_set_adisc(struct lpfc_hba * phba, 564lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
526 struct lpfc_nodelist * ndlp)
527{ 565{
566 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
567 struct lpfc_hba *phba = vport->phba;
568
528 /* Check config parameter use-adisc or FCP-2 */ 569 /* Check config parameter use-adisc or FCP-2 */
529 if ((phba->cfg_use_adisc == 0) && 570 if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
530 !(phba->fc_flag & FC_RSCN_MODE)) { 571 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
531 if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) 572 spin_lock_irq(shost->host_lock);
532 return 0; 573 ndlp->nlp_flag |= NLP_NPR_ADISC;
574 spin_unlock_irq(shost->host_lock);
575 return 1;
533 } 576 }
534 spin_lock_irq(phba->host->host_lock); 577 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
535 ndlp->nlp_flag |= NLP_NPR_ADISC; 578 lpfc_unreg_rpi(vport, ndlp);
536 spin_unlock_irq(phba->host->host_lock); 579 return 0;
537 return 1;
538} 580}
539 581
540static uint32_t 582static uint32_t
541lpfc_disc_illegal(struct lpfc_hba * phba, 583lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
542 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 584 void *arg, uint32_t evt)
543{ 585{
544 lpfc_printf_log(phba, 586 lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
545 KERN_ERR, 587 "%d (%d):0253 Illegal State Transition: node x%x "
546 LOG_DISCOVERY, 588 "event x%x, state x%x Data: x%x x%x\n",
547 "%d:0253 Illegal State Transition: node x%x event x%x, " 589 vport->phba->brd_no, vport->vpi,
548 "state x%x Data: x%x x%x\n",
549 phba->brd_no,
550 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 590 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
551 ndlp->nlp_flag); 591 ndlp->nlp_flag);
552 return ndlp->nlp_state; 592 return ndlp->nlp_state;
@@ -555,151 +595,162 @@ lpfc_disc_illegal(struct lpfc_hba * phba,
555/* Start of Discovery State Machine routines */ 595/* Start of Discovery State Machine routines */
556 596
557static uint32_t 597static uint32_t
558lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba, 598lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
559 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 599 void *arg, uint32_t evt)
560{ 600{
561 struct lpfc_iocbq *cmdiocb; 601 struct lpfc_iocbq *cmdiocb;
562 602
563 cmdiocb = (struct lpfc_iocbq *) arg; 603 cmdiocb = (struct lpfc_iocbq *) arg;
564 604
565 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 605 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
566 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 606 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
567 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 607 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
568 return ndlp->nlp_state; 608 return ndlp->nlp_state;
569 } 609 }
570 lpfc_drop_node(phba, ndlp); 610 lpfc_drop_node(vport, ndlp);
571 return NLP_STE_FREED_NODE; 611 return NLP_STE_FREED_NODE;
572} 612}
573 613
574static uint32_t 614static uint32_t
575lpfc_rcv_els_unused_node(struct lpfc_hba * phba, 615lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
576 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 616 void *arg, uint32_t evt)
577{ 617{
578 lpfc_issue_els_logo(phba, ndlp, 0); 618 lpfc_issue_els_logo(vport, ndlp, 0);
579 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 619 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
580 return ndlp->nlp_state; 620 return ndlp->nlp_state;
581} 621}
582 622
583static uint32_t 623static uint32_t
584lpfc_rcv_logo_unused_node(struct lpfc_hba * phba, 624lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
585 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 625 void *arg, uint32_t evt)
586{ 626{
587 struct lpfc_iocbq *cmdiocb; 627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
588 628 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
589 cmdiocb = (struct lpfc_iocbq *) arg;
590 629
591 spin_lock_irq(phba->host->host_lock); 630 spin_lock_irq(shost->host_lock);
592 ndlp->nlp_flag |= NLP_LOGO_ACC; 631 ndlp->nlp_flag |= NLP_LOGO_ACC;
593 spin_unlock_irq(phba->host->host_lock); 632 spin_unlock_irq(shost->host_lock);
594 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 633 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
595 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 634 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
596 635
597 return ndlp->nlp_state; 636 return ndlp->nlp_state;
598} 637}
599 638
600static uint32_t 639static uint32_t
601lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba, 640lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
602 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 641 void *arg, uint32_t evt)
603{ 642{
604 lpfc_drop_node(phba, ndlp); 643 lpfc_drop_node(vport, ndlp);
605 return NLP_STE_FREED_NODE; 644 return NLP_STE_FREED_NODE;
606} 645}
607 646
608static uint32_t 647static uint32_t
609lpfc_device_rm_unused_node(struct lpfc_hba * phba, 648lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
610 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 649 void *arg, uint32_t evt)
611{ 650{
612 lpfc_drop_node(phba, ndlp); 651 lpfc_drop_node(vport, ndlp);
613 return NLP_STE_FREED_NODE; 652 return NLP_STE_FREED_NODE;
614} 653}
615 654
616static uint32_t 655static uint32_t
617lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 656lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
618 void *arg, uint32_t evt) 657 void *arg, uint32_t evt)
619{ 658{
659 struct lpfc_hba *phba = vport->phba;
620 struct lpfc_iocbq *cmdiocb = arg; 660 struct lpfc_iocbq *cmdiocb = arg;
621 struct lpfc_dmabuf *pcmd; 661 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
622 struct serv_parm *sp; 662 uint32_t *lp = (uint32_t *) pcmd->virt;
623 uint32_t *lp; 663 struct serv_parm *sp = (struct serv_parm *) (lp + 1);
624 struct ls_rjt stat; 664 struct ls_rjt stat;
625 int port_cmp; 665 int port_cmp;
626 666
627 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
628 lp = (uint32_t *) pcmd->virt;
629 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
630
631 memset(&stat, 0, sizeof (struct ls_rjt)); 667 memset(&stat, 0, sizeof (struct ls_rjt));
632 668
633 /* For a PLOGI, we only accept if our portname is less 669 /* For a PLOGI, we only accept if our portname is less
634 * than the remote portname. 670 * than the remote portname.
635 */ 671 */
636 phba->fc_stat.elsLogiCol++; 672 phba->fc_stat.elsLogiCol++;
637 port_cmp = memcmp(&phba->fc_portname, &sp->portName, 673 port_cmp = memcmp(&vport->fc_portname, &sp->portName,
638 sizeof (struct lpfc_name)); 674 sizeof(struct lpfc_name));
639 675
640 if (port_cmp >= 0) { 676 if (port_cmp >= 0) {
641 /* Reject this request because the remote node will accept 677 /* Reject this request because the remote node will accept
642 ours */ 678 ours */
643 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 679 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
644 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 680 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
645 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 681 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
682 NULL);
646 } else { 683 } else {
647 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 684 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
648 } /* if our portname was less */ 685 } /* If our portname was less */
649 686
650 return ndlp->nlp_state; 687 return ndlp->nlp_state;
651} 688}
652 689
653static uint32_t 690static uint32_t
654lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba, 691lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
655 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 692 void *arg, uint32_t evt)
656{ 693{
657 struct lpfc_iocbq *cmdiocb; 694 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
695 struct ls_rjt stat;
658 696
659 cmdiocb = (struct lpfc_iocbq *) arg; 697 memset(&stat, 0, sizeof (struct ls_rjt));
698 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
699 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
700 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
701 return ndlp->nlp_state;
702}
660 703
661 /* software abort outstanding PLOGI */ 704static uint32_t
662 lpfc_els_abort(phba, ndlp); 705lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
706 void *arg, uint32_t evt)
707{
708 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
709
710 /* software abort outstanding PLOGI */
711 lpfc_els_abort(vport->phba, ndlp);
663 712
664 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 713 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
665 return ndlp->nlp_state; 714 return ndlp->nlp_state;
666} 715}
667 716
668static uint32_t 717static uint32_t
669lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba, 718lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
670 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 719 void *arg, uint32_t evt)
671{ 720{
672 struct lpfc_iocbq *cmdiocb; 721 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
673 722 struct lpfc_hba *phba = vport->phba;
674 cmdiocb = (struct lpfc_iocbq *) arg; 723 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
675 724
676 /* software abort outstanding PLOGI */ 725 /* software abort outstanding PLOGI */
677 lpfc_els_abort(phba, ndlp); 726 lpfc_els_abort(phba, ndlp);
678 727
679 if (evt == NLP_EVT_RCV_LOGO) { 728 if (evt == NLP_EVT_RCV_LOGO) {
680 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 729 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
681 } else { 730 } else {
682 lpfc_issue_els_logo(phba, ndlp, 0); 731 lpfc_issue_els_logo(vport, ndlp, 0);
683 } 732 }
684 733
685 /* Put ndlp in npr list set plogi timer for 1 sec */ 734 /* Put ndlp in npr state set plogi timer for 1 sec */
686 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 735 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
687 spin_lock_irq(phba->host->host_lock); 736 spin_lock_irq(shost->host_lock);
688 ndlp->nlp_flag |= NLP_DELAY_TMO; 737 ndlp->nlp_flag |= NLP_DELAY_TMO;
689 spin_unlock_irq(phba->host->host_lock); 738 spin_unlock_irq(shost->host_lock);
690 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 739 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
691 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 740 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
692 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 741 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
693 742
694 return ndlp->nlp_state; 743 return ndlp->nlp_state;
695} 744}
696 745
697static uint32_t 746static uint32_t
698lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, 747lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
699 struct lpfc_nodelist * ndlp, void *arg, 748 struct lpfc_nodelist *ndlp,
749 void *arg,
700 uint32_t evt) 750 uint32_t evt)
701{ 751{
702 struct lpfc_iocbq *cmdiocb, *rspiocb; 752 struct lpfc_hba *phba = vport->phba;
753 struct lpfc_iocbq *cmdiocb, *rspiocb;
703 struct lpfc_dmabuf *pcmd, *prsp, *mp; 754 struct lpfc_dmabuf *pcmd, *prsp, *mp;
704 uint32_t *lp; 755 uint32_t *lp;
705 IOCB_t *irsp; 756 IOCB_t *irsp;
@@ -721,31 +772,26 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
721 772
722 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 773 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
723 774
724 prsp = list_get_first(&pcmd->list, 775 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
725 struct lpfc_dmabuf,
726 list);
727 lp = (uint32_t *) prsp->virt;
728 776
777 lp = (uint32_t *) prsp->virt;
729 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 778 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
730 if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3)) 779 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
731 goto out; 780 goto out;
732 781
733 /* PLOGI chkparm OK */ 782 /* PLOGI chkparm OK */
734 lpfc_printf_log(phba, 783 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
735 KERN_INFO, 784 "%d (%d):0121 PLOGI chkparm OK "
736 LOG_ELS,
737 "%d:0121 PLOGI chkparm OK "
738 "Data: x%x x%x x%x x%x\n", 785 "Data: x%x x%x x%x x%x\n",
739 phba->brd_no, 786 phba->brd_no, vport->vpi,
740 ndlp->nlp_DID, ndlp->nlp_state, 787 ndlp->nlp_DID, ndlp->nlp_state,
741 ndlp->nlp_flag, ndlp->nlp_rpi); 788 ndlp->nlp_flag, ndlp->nlp_rpi);
742 789
743 if ((phba->cfg_fcp_class == 2) && 790 if (phba->cfg_fcp_class == 2 && (sp->cls2.classValid))
744 (sp->cls2.classValid)) {
745 ndlp->nlp_fcp_info |= CLASS2; 791 ndlp->nlp_fcp_info |= CLASS2;
746 } else { 792 else
747 ndlp->nlp_fcp_info |= CLASS3; 793 ndlp->nlp_fcp_info |= CLASS3;
748 } 794
749 ndlp->nlp_class_sup = 0; 795 ndlp->nlp_class_sup = 0;
750 if (sp->cls1.classValid) 796 if (sp->cls1.classValid)
751 ndlp->nlp_class_sup |= FC_COS_CLASS1; 797 ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -756,16 +802,23 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
756 if (sp->cls4.classValid) 802 if (sp->cls4.classValid)
757 ndlp->nlp_class_sup |= FC_COS_CLASS4; 803 ndlp->nlp_class_sup |= FC_COS_CLASS4;
758 ndlp->nlp_maxframe = 804 ndlp->nlp_maxframe =
759 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 805 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
760 sp->cmn.bbRcvSizeLsb;
761 806
762 if (!(mbox = mempool_alloc(phba->mbox_mem_pool, 807 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
763 GFP_KERNEL))) 808 if (!mbox) {
809 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
810 "%d (%d):0133 PLOGI: no memory for reg_login "
811 "Data: x%x x%x x%x x%x\n",
812 phba->brd_no, vport->vpi,
813 ndlp->nlp_DID, ndlp->nlp_state,
814 ndlp->nlp_flag, ndlp->nlp_rpi);
764 goto out; 815 goto out;
816 }
765 817
766 lpfc_unreg_rpi(phba, ndlp); 818 lpfc_unreg_rpi(vport, ndlp);
767 if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp, 819
768 mbox, 0) == 0) { 820 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
821 (uint8_t *) sp, mbox, 0) == 0) {
769 switch (ndlp->nlp_DID) { 822 switch (ndlp->nlp_DID) {
770 case NameServer_DID: 823 case NameServer_DID:
771 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; 824 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
@@ -777,68 +830,104 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
777 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 830 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
778 } 831 }
779 mbox->context2 = lpfc_nlp_get(ndlp); 832 mbox->context2 = lpfc_nlp_get(ndlp);
833 mbox->vport = vport;
780 if (lpfc_sli_issue_mbox(phba, mbox, 834 if (lpfc_sli_issue_mbox(phba, mbox,
781 (MBX_NOWAIT | MBX_STOP_IOCB)) 835 (MBX_NOWAIT | MBX_STOP_IOCB))
782 != MBX_NOT_FINISHED) { 836 != MBX_NOT_FINISHED) {
783 lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE); 837 lpfc_nlp_set_state(vport, ndlp,
838 NLP_STE_REG_LOGIN_ISSUE);
784 return ndlp->nlp_state; 839 return ndlp->nlp_state;
785 } 840 }
786 lpfc_nlp_put(ndlp); 841 lpfc_nlp_put(ndlp);
787 mp = (struct lpfc_dmabuf *)mbox->context1; 842 mp = (struct lpfc_dmabuf *) mbox->context1;
788 lpfc_mbuf_free(phba, mp->virt, mp->phys); 843 lpfc_mbuf_free(phba, mp->virt, mp->phys);
789 kfree(mp); 844 kfree(mp);
790 mempool_free(mbox, phba->mbox_mem_pool); 845 mempool_free(mbox, phba->mbox_mem_pool);
846
847 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
848 "%d (%d):0134 PLOGI: cannot issue reg_login "
849 "Data: x%x x%x x%x x%x\n",
850 phba->brd_no, vport->vpi,
851 ndlp->nlp_DID, ndlp->nlp_state,
852 ndlp->nlp_flag, ndlp->nlp_rpi);
791 } else { 853 } else {
792 mempool_free(mbox, phba->mbox_mem_pool); 854 mempool_free(mbox, phba->mbox_mem_pool);
855
856 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
857 "%d (%d):0135 PLOGI: cannot format reg_login "
858 "Data: x%x x%x x%x x%x\n",
859 phba->brd_no, vport->vpi,
860 ndlp->nlp_DID, ndlp->nlp_state,
861 ndlp->nlp_flag, ndlp->nlp_rpi);
793 } 862 }
794 863
795 864
796 out: 865out:
866 if (ndlp->nlp_DID == NameServer_DID) {
867 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
868 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
869 "%d (%d):0261 Cannot Register NameServer login\n",
870 phba->brd_no, vport->vpi);
871 }
872
797 /* Free this node since the driver cannot login or has the wrong 873 /* Free this node since the driver cannot login or has the wrong
798 sparm */ 874 sparm */
799 lpfc_drop_node(phba, ndlp); 875 lpfc_drop_node(vport, ndlp);
800 return NLP_STE_FREED_NODE; 876 return NLP_STE_FREED_NODE;
801} 877}
802 878
803static uint32_t 879static uint32_t
804lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, 880lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
805 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 881 void *arg, uint32_t evt)
806{ 882{
807 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 883 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
884
885 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
886 spin_lock_irq(shost->host_lock);
808 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 887 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
888 spin_unlock_irq(shost->host_lock);
809 return ndlp->nlp_state; 889 return ndlp->nlp_state;
810 } 890 } else {
811 else {
812 /* software abort outstanding PLOGI */ 891 /* software abort outstanding PLOGI */
813 lpfc_els_abort(phba, ndlp); 892 lpfc_els_abort(vport->phba, ndlp);
814 893
815 lpfc_drop_node(phba, ndlp); 894 lpfc_drop_node(vport, ndlp);
816 return NLP_STE_FREED_NODE; 895 return NLP_STE_FREED_NODE;
817 } 896 }
818} 897}
819 898
820static uint32_t 899static uint32_t
821lpfc_device_recov_plogi_issue(struct lpfc_hba * phba, 900lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
822 struct lpfc_nodelist * ndlp, void *arg, 901 struct lpfc_nodelist *ndlp,
823 uint32_t evt) 902 void *arg,
903 uint32_t evt)
824{ 904{
905 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
906 struct lpfc_hba *phba = vport->phba;
907
908 /* Don't do anything that will mess up processing of the
909 * previous RSCN.
910 */
911 if (vport->fc_flag & FC_RSCN_DEFERRED)
912 return ndlp->nlp_state;
913
825 /* software abort outstanding PLOGI */ 914 /* software abort outstanding PLOGI */
826 lpfc_els_abort(phba, ndlp); 915 lpfc_els_abort(phba, ndlp);
827 916
828 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 917 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
829 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 918 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
830 spin_lock_irq(phba->host->host_lock); 919 spin_lock_irq(shost->host_lock);
831 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 920 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
832 spin_unlock_irq(phba->host->host_lock); 921 spin_unlock_irq(shost->host_lock);
833 922
834 return ndlp->nlp_state; 923 return ndlp->nlp_state;
835} 924}
836 925
837static uint32_t 926static uint32_t
838lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba, 927lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
839 struct lpfc_nodelist * ndlp, void *arg, 928 void *arg, uint32_t evt)
840 uint32_t evt)
841{ 929{
930 struct lpfc_hba *phba = vport->phba;
842 struct lpfc_iocbq *cmdiocb; 931 struct lpfc_iocbq *cmdiocb;
843 932
844 /* software abort outstanding ADISC */ 933 /* software abort outstanding ADISC */
@@ -846,34 +935,31 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
846 935
847 cmdiocb = (struct lpfc_iocbq *) arg; 936 cmdiocb = (struct lpfc_iocbq *) arg;
848 937
849 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 938 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
850 return ndlp->nlp_state; 939 return ndlp->nlp_state;
851 } 940
852 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 941 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
853 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 942 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
854 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 943 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
855 944
856 return ndlp->nlp_state; 945 return ndlp->nlp_state;
857} 946}
858 947
859static uint32_t 948static uint32_t
860lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba, 949lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
861 struct lpfc_nodelist * ndlp, void *arg, 950 void *arg, uint32_t evt)
862 uint32_t evt)
863{ 951{
864 struct lpfc_iocbq *cmdiocb; 952 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
865
866 cmdiocb = (struct lpfc_iocbq *) arg;
867 953
868 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); 954 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
869 return ndlp->nlp_state; 955 return ndlp->nlp_state;
870} 956}
871 957
872static uint32_t 958static uint32_t
873lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba, 959lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
874 struct lpfc_nodelist * ndlp, void *arg, 960 void *arg, uint32_t evt)
875 uint32_t evt)
876{ 961{
962 struct lpfc_hba *phba = vport->phba;
877 struct lpfc_iocbq *cmdiocb; 963 struct lpfc_iocbq *cmdiocb;
878 964
879 cmdiocb = (struct lpfc_iocbq *) arg; 965 cmdiocb = (struct lpfc_iocbq *) arg;
@@ -881,42 +967,43 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
881 /* software abort outstanding ADISC */ 967 /* software abort outstanding ADISC */
882 lpfc_els_abort(phba, ndlp); 968 lpfc_els_abort(phba, ndlp);
883 969
884 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 970 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
885 return ndlp->nlp_state; 971 return ndlp->nlp_state;
886} 972}
887 973
888static uint32_t 974static uint32_t
889lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba, 975lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
890 struct lpfc_nodelist * ndlp, void *arg, 976 struct lpfc_nodelist *ndlp,
891 uint32_t evt) 977 void *arg, uint32_t evt)
892{ 978{
893 struct lpfc_iocbq *cmdiocb; 979 struct lpfc_iocbq *cmdiocb;
894 980
895 cmdiocb = (struct lpfc_iocbq *) arg; 981 cmdiocb = (struct lpfc_iocbq *) arg;
896 982
897 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 983 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
898 return ndlp->nlp_state; 984 return ndlp->nlp_state;
899} 985}
900 986
901static uint32_t 987static uint32_t
902lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba, 988lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
903 struct lpfc_nodelist * ndlp, void *arg, 989 void *arg, uint32_t evt)
904 uint32_t evt)
905{ 990{
906 struct lpfc_iocbq *cmdiocb; 991 struct lpfc_iocbq *cmdiocb;
907 992
908 cmdiocb = (struct lpfc_iocbq *) arg; 993 cmdiocb = (struct lpfc_iocbq *) arg;
909 994
910 /* Treat like rcv logo */ 995 /* Treat like rcv logo */
911 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); 996 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
912 return ndlp->nlp_state; 997 return ndlp->nlp_state;
913} 998}
914 999
915static uint32_t 1000static uint32_t
916lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba, 1001lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
917 struct lpfc_nodelist * ndlp, void *arg, 1002 struct lpfc_nodelist *ndlp,
918 uint32_t evt) 1003 void *arg, uint32_t evt)
919{ 1004{
1005 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1006 struct lpfc_hba *phba = vport->phba;
920 struct lpfc_iocbq *cmdiocb, *rspiocb; 1007 struct lpfc_iocbq *cmdiocb, *rspiocb;
921 IOCB_t *irsp; 1008 IOCB_t *irsp;
922 ADISC *ap; 1009 ADISC *ap;
@@ -928,101 +1015,112 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
928 irsp = &rspiocb->iocb; 1015 irsp = &rspiocb->iocb;
929 1016
930 if ((irsp->ulpStatus) || 1017 if ((irsp->ulpStatus) ||
931 (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) { 1018 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
932 /* 1 sec timeout */ 1019 /* 1 sec timeout */
933 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1020 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
934 spin_lock_irq(phba->host->host_lock); 1021 spin_lock_irq(shost->host_lock);
935 ndlp->nlp_flag |= NLP_DELAY_TMO; 1022 ndlp->nlp_flag |= NLP_DELAY_TMO;
936 spin_unlock_irq(phba->host->host_lock); 1023 spin_unlock_irq(shost->host_lock);
937 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1024 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
938 1025
939 memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name)); 1026 memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
940 memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name)); 1027 memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
941 1028
942 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1029 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
943 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1030 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
944 lpfc_unreg_rpi(phba, ndlp); 1031 lpfc_unreg_rpi(vport, ndlp);
945 return ndlp->nlp_state; 1032 return ndlp->nlp_state;
946 } 1033 }
947 1034
948 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1035 if (ndlp->nlp_type & NLP_FCP_TARGET) {
949 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1036 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
950 lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE); 1037 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
951 } else { 1038 } else {
952 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1039 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
953 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1040 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
954 } 1041 }
955 return ndlp->nlp_state; 1042 return ndlp->nlp_state;
956} 1043}
957 1044
958static uint32_t 1045static uint32_t
959lpfc_device_rm_adisc_issue(struct lpfc_hba * phba, 1046lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
960 struct lpfc_nodelist * ndlp, void *arg, 1047 void *arg, uint32_t evt)
961 uint32_t evt)
962{ 1048{
963 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1049 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1050
1051 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1052 spin_lock_irq(shost->host_lock);
964 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1053 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1054 spin_unlock_irq(shost->host_lock);
965 return ndlp->nlp_state; 1055 return ndlp->nlp_state;
966 } 1056 } else {
967 else {
968 /* software abort outstanding ADISC */ 1057 /* software abort outstanding ADISC */
969 lpfc_els_abort(phba, ndlp); 1058 lpfc_els_abort(vport->phba, ndlp);
970 1059
971 lpfc_drop_node(phba, ndlp); 1060 lpfc_drop_node(vport, ndlp);
972 return NLP_STE_FREED_NODE; 1061 return NLP_STE_FREED_NODE;
973 } 1062 }
974} 1063}
975 1064
976static uint32_t 1065static uint32_t
977lpfc_device_recov_adisc_issue(struct lpfc_hba * phba, 1066lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
978 struct lpfc_nodelist * ndlp, void *arg, 1067 struct lpfc_nodelist *ndlp,
979 uint32_t evt) 1068 void *arg,
1069 uint32_t evt)
980{ 1070{
1071 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1072 struct lpfc_hba *phba = vport->phba;
1073
1074 /* Don't do anything that will mess up processing of the
1075 * previous RSCN.
1076 */
1077 if (vport->fc_flag & FC_RSCN_DEFERRED)
1078 return ndlp->nlp_state;
1079
981 /* software abort outstanding ADISC */ 1080 /* software abort outstanding ADISC */
982 lpfc_els_abort(phba, ndlp); 1081 lpfc_els_abort(phba, ndlp);
983 1082
984 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1083 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
985 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1084 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
986 spin_lock_irq(phba->host->host_lock); 1085 spin_lock_irq(shost->host_lock);
987 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1086 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
988 ndlp->nlp_flag |= NLP_NPR_ADISC; 1087 spin_unlock_irq(shost->host_lock);
989 spin_unlock_irq(phba->host->host_lock); 1088 lpfc_disc_set_adisc(vport, ndlp);
990
991 return ndlp->nlp_state; 1089 return ndlp->nlp_state;
992} 1090}
993 1091
994static uint32_t 1092static uint32_t
995lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba, 1093lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
996 struct lpfc_nodelist * ndlp, void *arg, 1094 struct lpfc_nodelist *ndlp,
1095 void *arg,
997 uint32_t evt) 1096 uint32_t evt)
998{ 1097{
999 struct lpfc_iocbq *cmdiocb; 1098 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1000 1099
1001 cmdiocb = (struct lpfc_iocbq *) arg; 1100 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1002
1003 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1004 return ndlp->nlp_state; 1101 return ndlp->nlp_state;
1005} 1102}
1006 1103
1007static uint32_t 1104static uint32_t
1008lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba, 1105lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1009 struct lpfc_nodelist * ndlp, void *arg, 1106 struct lpfc_nodelist *ndlp,
1107 void *arg,
1010 uint32_t evt) 1108 uint32_t evt)
1011{ 1109{
1012 struct lpfc_iocbq *cmdiocb; 1110 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1013 1111
1014 cmdiocb = (struct lpfc_iocbq *) arg; 1112 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1015
1016 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1017 return ndlp->nlp_state; 1113 return ndlp->nlp_state;
1018} 1114}
1019 1115
1020static uint32_t 1116static uint32_t
1021lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba, 1117lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1022 struct lpfc_nodelist * ndlp, void *arg, 1118 struct lpfc_nodelist *ndlp,
1119 void *arg,
1023 uint32_t evt) 1120 uint32_t evt)
1024{ 1121{
1025 struct lpfc_iocbq *cmdiocb; 1122 struct lpfc_hba *phba = vport->phba;
1123 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1026 LPFC_MBOXQ_t *mb; 1124 LPFC_MBOXQ_t *mb;
1027 LPFC_MBOXQ_t *nextmb; 1125 LPFC_MBOXQ_t *nextmb;
1028 struct lpfc_dmabuf *mp; 1126 struct lpfc_dmabuf *mp;
@@ -1033,12 +1131,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1033 if ((mb = phba->sli.mbox_active)) { 1131 if ((mb = phba->sli.mbox_active)) {
1034 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1132 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1035 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1133 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1134 lpfc_nlp_put(ndlp);
1036 mb->context2 = NULL; 1135 mb->context2 = NULL;
1037 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1136 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1038 } 1137 }
1039 } 1138 }
1040 1139
1041 spin_lock_irq(phba->host->host_lock); 1140 spin_lock_irq(&phba->hbalock);
1042 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1141 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1043 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1142 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1044 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1143 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1047,61 +1146,61 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1047 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1048 kfree(mp); 1147 kfree(mp);
1049 } 1148 }
1149 lpfc_nlp_put(ndlp);
1050 list_del(&mb->list); 1150 list_del(&mb->list);
1051 mempool_free(mb, phba->mbox_mem_pool); 1151 mempool_free(mb, phba->mbox_mem_pool);
1052 } 1152 }
1053 } 1153 }
1054 spin_unlock_irq(phba->host->host_lock); 1154 spin_unlock_irq(&phba->hbalock);
1055 1155
1056 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1156 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1057 return ndlp->nlp_state; 1157 return ndlp->nlp_state;
1058} 1158}
1059 1159
1060static uint32_t 1160static uint32_t
1061lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba, 1161lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1062 struct lpfc_nodelist * ndlp, void *arg, 1162 struct lpfc_nodelist *ndlp,
1163 void *arg,
1063 uint32_t evt) 1164 uint32_t evt)
1064{ 1165{
1065 struct lpfc_iocbq *cmdiocb; 1166 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1066 1167
1067 cmdiocb = (struct lpfc_iocbq *) arg; 1168 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1068
1069 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1070 return ndlp->nlp_state; 1169 return ndlp->nlp_state;
1071} 1170}
1072 1171
1073static uint32_t 1172static uint32_t
1074lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba, 1173lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1075 struct lpfc_nodelist * ndlp, void *arg, 1174 struct lpfc_nodelist *ndlp,
1175 void *arg,
1076 uint32_t evt) 1176 uint32_t evt)
1077{ 1177{
1078 struct lpfc_iocbq *cmdiocb; 1178 struct lpfc_iocbq *cmdiocb;
1079 1179
1080 cmdiocb = (struct lpfc_iocbq *) arg; 1180 cmdiocb = (struct lpfc_iocbq *) arg;
1081 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 1181 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1082 return ndlp->nlp_state; 1182 return ndlp->nlp_state;
1083} 1183}
1084 1184
1085static uint32_t 1185static uint32_t
1086lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba, 1186lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1087 struct lpfc_nodelist * ndlp, 1187 struct lpfc_nodelist *ndlp,
1088 void *arg, uint32_t evt) 1188 void *arg,
1189 uint32_t evt)
1089{ 1190{
1090 LPFC_MBOXQ_t *pmb; 1191 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1091 MAILBOX_t *mb; 1192 struct lpfc_hba *phba = vport->phba;
1092 uint32_t did; 1193 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1194 MAILBOX_t *mb = &pmb->mb;
1195 uint32_t did = mb->un.varWords[1];
1093 1196
1094 pmb = (LPFC_MBOXQ_t *) arg;
1095 mb = &pmb->mb;
1096 did = mb->un.varWords[1];
1097 if (mb->mbxStatus) { 1197 if (mb->mbxStatus) {
1098 /* RegLogin failed */ 1198 /* RegLogin failed */
1099 lpfc_printf_log(phba, 1199 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
1100 KERN_ERR, 1200 "%d (%d):0246 RegLogin failed Data: x%x x%x "
1101 LOG_DISCOVERY, 1201 "x%x\n",
1102 "%d:0246 RegLogin failed Data: x%x x%x x%x\n", 1202 phba->brd_no, vport->vpi,
1103 phba->brd_no, 1203 did, mb->mbxStatus, vport->port_state);
1104 did, mb->mbxStatus, phba->hba_state);
1105 1204
1106 /* 1205 /*
1107 * If RegLogin failed due to lack of HBA resources do not 1206 * If RegLogin failed due to lack of HBA resources do not
@@ -1109,20 +1208,20 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1109 */ 1208 */
1110 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1209 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1111 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 1210 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
1112 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 1211 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1113 return ndlp->nlp_state; 1212 return ndlp->nlp_state;
1114 } 1213 }
1115 1214
1116 /* Put ndlp in npr list set plogi timer for 1 sec */ 1215 /* Put ndlp in npr state set plogi timer for 1 sec */
1117 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1216 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1118 spin_lock_irq(phba->host->host_lock); 1217 spin_lock_irq(shost->host_lock);
1119 ndlp->nlp_flag |= NLP_DELAY_TMO; 1218 ndlp->nlp_flag |= NLP_DELAY_TMO;
1120 spin_unlock_irq(phba->host->host_lock); 1219 spin_unlock_irq(shost->host_lock);
1121 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1220 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1122 1221
1123 lpfc_issue_els_logo(phba, ndlp, 0); 1222 lpfc_issue_els_logo(vport, ndlp, 0);
1124 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1223 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1125 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1224 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1126 return ndlp->nlp_state; 1225 return ndlp->nlp_state;
1127 } 1226 }
1128 1227
@@ -1131,91 +1230,99 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1131 /* Only if we are not a fabric nport do we issue PRLI */ 1230 /* Only if we are not a fabric nport do we issue PRLI */
1132 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1231 if (!(ndlp->nlp_type & NLP_FABRIC)) {
1133 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1232 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1134 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE); 1233 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1135 lpfc_issue_els_prli(phba, ndlp, 0); 1234 lpfc_issue_els_prli(vport, ndlp, 0);
1136 } else { 1235 } else {
1137 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1236 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1138 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1237 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1139 } 1238 }
1140 return ndlp->nlp_state; 1239 return ndlp->nlp_state;
1141} 1240}
1142 1241
1143static uint32_t 1242static uint32_t
1144lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba, 1243lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1145 struct lpfc_nodelist * ndlp, void *arg, 1244 struct lpfc_nodelist *ndlp,
1245 void *arg,
1146 uint32_t evt) 1246 uint32_t evt)
1147{ 1247{
1148 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1248 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1249
1250 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1251 spin_lock_irq(shost->host_lock);
1149 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1252 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1253 spin_unlock_irq(shost->host_lock);
1150 return ndlp->nlp_state; 1254 return ndlp->nlp_state;
1151 } 1255 } else {
1152 else { 1256 lpfc_drop_node(vport, ndlp);
1153 lpfc_drop_node(phba, ndlp);
1154 return NLP_STE_FREED_NODE; 1257 return NLP_STE_FREED_NODE;
1155 } 1258 }
1156} 1259}
1157 1260
1158static uint32_t 1261static uint32_t
1159lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba, 1262lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1160 struct lpfc_nodelist * ndlp, void *arg, 1263 struct lpfc_nodelist *ndlp,
1161 uint32_t evt) 1264 void *arg,
1265 uint32_t evt)
1162{ 1266{
1267 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1268
1269 /* Don't do anything that will mess up processing of the
1270 * previous RSCN.
1271 */
1272 if (vport->fc_flag & FC_RSCN_DEFERRED)
1273 return ndlp->nlp_state;
1274
1163 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1275 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1164 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1276 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1165 spin_lock_irq(phba->host->host_lock); 1277 spin_lock_irq(shost->host_lock);
1166 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1278 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1167 spin_unlock_irq(phba->host->host_lock); 1279 spin_unlock_irq(shost->host_lock);
1280 lpfc_disc_set_adisc(vport, ndlp);
1168 return ndlp->nlp_state; 1281 return ndlp->nlp_state;
1169} 1282}
1170 1283
1171static uint32_t 1284static uint32_t
1172lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba, 1285lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1173 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1286 void *arg, uint32_t evt)
1174{ 1287{
1175 struct lpfc_iocbq *cmdiocb; 1288 struct lpfc_iocbq *cmdiocb;
1176 1289
1177 cmdiocb = (struct lpfc_iocbq *) arg; 1290 cmdiocb = (struct lpfc_iocbq *) arg;
1178 1291
1179 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1292 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1180 return ndlp->nlp_state; 1293 return ndlp->nlp_state;
1181} 1294}
1182 1295
1183static uint32_t 1296static uint32_t
1184lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba, 1297lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1185 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1298 void *arg, uint32_t evt)
1186{ 1299{
1187 struct lpfc_iocbq *cmdiocb; 1300 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1188
1189 cmdiocb = (struct lpfc_iocbq *) arg;
1190 1301
1191 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); 1302 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1192 return ndlp->nlp_state; 1303 return ndlp->nlp_state;
1193} 1304}
1194 1305
1195static uint32_t 1306static uint32_t
1196lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba, 1307lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1197 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1308 void *arg, uint32_t evt)
1198{ 1309{
1199 struct lpfc_iocbq *cmdiocb; 1310 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1200
1201 cmdiocb = (struct lpfc_iocbq *) arg;
1202 1311
1203 /* Software abort outstanding PRLI before sending acc */ 1312 /* Software abort outstanding PRLI before sending acc */
1204 lpfc_els_abort(phba, ndlp); 1313 lpfc_els_abort(vport->phba, ndlp);
1205 1314
1206 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1315 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1207 return ndlp->nlp_state; 1316 return ndlp->nlp_state;
1208} 1317}
1209 1318
1210static uint32_t 1319static uint32_t
1211lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba, 1320lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1212 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1321 void *arg, uint32_t evt)
1213{ 1322{
1214 struct lpfc_iocbq *cmdiocb; 1323 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1215
1216 cmdiocb = (struct lpfc_iocbq *) arg;
1217 1324
1218 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1325 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1219 return ndlp->nlp_state; 1326 return ndlp->nlp_state;
1220} 1327}
1221 1328
@@ -1225,21 +1332,22 @@ lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
1225 * NEXT STATE = PRLI_ISSUE 1332 * NEXT STATE = PRLI_ISSUE
1226 */ 1333 */
1227static uint32_t 1334static uint32_t
1228lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba, 1335lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1229 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1336 void *arg, uint32_t evt)
1230{ 1337{
1231 struct lpfc_iocbq *cmdiocb; 1338 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1232 1339
1233 cmdiocb = (struct lpfc_iocbq *) arg; 1340 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1234 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1235 return ndlp->nlp_state; 1341 return ndlp->nlp_state;
1236} 1342}
1237 1343
1238static uint32_t 1344static uint32_t
1239lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba, 1345lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1240 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1346 void *arg, uint32_t evt)
1241{ 1347{
1348 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1242 struct lpfc_iocbq *cmdiocb, *rspiocb; 1349 struct lpfc_iocbq *cmdiocb, *rspiocb;
1350 struct lpfc_hba *phba = vport->phba;
1243 IOCB_t *irsp; 1351 IOCB_t *irsp;
1244 PRLI *npr; 1352 PRLI *npr;
1245 1353
@@ -1249,8 +1357,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1249 1357
1250 irsp = &rspiocb->iocb; 1358 irsp = &rspiocb->iocb;
1251 if (irsp->ulpStatus) { 1359 if (irsp->ulpStatus) {
1360 if ((vport->port_type == LPFC_NPIV_PORT) &&
1361 phba->cfg_vport_restrict_login) {
1362 goto out;
1363 }
1252 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1364 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1253 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1365 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1254 return ndlp->nlp_state; 1366 return ndlp->nlp_state;
1255 } 1367 }
1256 1368
@@ -1266,319 +1378,329 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1266 if (npr->Retry) 1378 if (npr->Retry)
1267 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1379 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1268 } 1380 }
1381 if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1382 (vport->port_type == LPFC_NPIV_PORT) &&
1383 phba->cfg_vport_restrict_login) {
1384out:
1385 spin_lock_irq(shost->host_lock);
1386 ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1387 spin_unlock_irq(shost->host_lock);
1388 lpfc_issue_els_logo(vport, ndlp, 0);
1389
1390 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1391 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1392 return ndlp->nlp_state;
1393 }
1269 1394
1270 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1395 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1271 lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE); 1396 if (ndlp->nlp_type & NLP_FCP_TARGET)
1397 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1398 else
1399 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1272 return ndlp->nlp_state; 1400 return ndlp->nlp_state;
1273} 1401}
1274 1402
1275/*! lpfc_device_rm_prli_issue 1403/*! lpfc_device_rm_prli_issue
1276 * 1404 *
1277 * \pre 1405 * \pre
1278 * \post 1406 * \post
1279 * \param phba 1407 * \param phba
1280 * \param ndlp 1408 * \param ndlp
1281 * \param arg 1409 * \param arg
1282 * \param evt 1410 * \param evt
1283 * \return uint32_t 1411 * \return uint32_t
1284 * 1412 *
1285 * \b Description: 1413 * \b Description:
1286 * This routine is envoked when we a request to remove a nport we are in the 1414 * This routine is envoked when we a request to remove a nport we are in the
1287 * process of PRLIing. We should software abort outstanding prli, unreg 1415 * process of PRLIing. We should software abort outstanding prli, unreg
1288 * login, send a logout. We will change node state to UNUSED_NODE, put it 1416 * login, send a logout. We will change node state to UNUSED_NODE, put it
1289 * on plogi list so it can be freed when LOGO completes. 1417 * on plogi list so it can be freed when LOGO completes.
1290 * 1418 *
1291 */ 1419 */
1420
1292static uint32_t 1421static uint32_t
1293lpfc_device_rm_prli_issue(struct lpfc_hba * phba, 1422lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1294 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1423 void *arg, uint32_t evt)
1295{ 1424{
1296 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1425 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1426
1427 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1428 spin_lock_irq(shost->host_lock);
1297 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1429 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1430 spin_unlock_irq(shost->host_lock);
1298 return ndlp->nlp_state; 1431 return ndlp->nlp_state;
1299 } 1432 } else {
1300 else {
1301 /* software abort outstanding PLOGI */ 1433 /* software abort outstanding PLOGI */
1302 lpfc_els_abort(phba, ndlp); 1434 lpfc_els_abort(vport->phba, ndlp);
1303 1435
1304 lpfc_drop_node(phba, ndlp); 1436 lpfc_drop_node(vport, ndlp);
1305 return NLP_STE_FREED_NODE; 1437 return NLP_STE_FREED_NODE;
1306 } 1438 }
1307} 1439}
1308 1440
1309 1441
1310/*! lpfc_device_recov_prli_issue 1442/*! lpfc_device_recov_prli_issue
1311 * 1443 *
1312 * \pre 1444 * \pre
1313 * \post 1445 * \post
1314 * \param phba 1446 * \param phba
1315 * \param ndlp 1447 * \param ndlp
1316 * \param arg 1448 * \param arg
1317 * \param evt 1449 * \param evt
1318 * \return uint32_t 1450 * \return uint32_t
1319 * 1451 *
1320 * \b Description: 1452 * \b Description:
1321 * The routine is envoked when the state of a device is unknown, like 1453 * The routine is envoked when the state of a device is unknown, like
1322 * during a link down. We should remove the nodelist entry from the 1454 * during a link down. We should remove the nodelist entry from the
1323 * unmapped list, issue a UNREG_LOGIN, do a software abort of the 1455 * unmapped list, issue a UNREG_LOGIN, do a software abort of the
1324 * outstanding PRLI command, then free the node entry. 1456 * outstanding PRLI command, then free the node entry.
1325 */ 1457 */
1326static uint32_t 1458static uint32_t
1327lpfc_device_recov_prli_issue(struct lpfc_hba * phba, 1459lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1328 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1460 struct lpfc_nodelist *ndlp,
1461 void *arg,
1462 uint32_t evt)
1329{ 1463{
1464 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1465 struct lpfc_hba *phba = vport->phba;
1466
1467 /* Don't do anything that will mess up processing of the
1468 * previous RSCN.
1469 */
1470 if (vport->fc_flag & FC_RSCN_DEFERRED)
1471 return ndlp->nlp_state;
1472
1330 /* software abort outstanding PRLI */ 1473 /* software abort outstanding PRLI */
1331 lpfc_els_abort(phba, ndlp); 1474 lpfc_els_abort(phba, ndlp);
1332 1475
1333 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1476 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1334 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1477 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1335 spin_lock_irq(phba->host->host_lock); 1478 spin_lock_irq(shost->host_lock);
1336 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1479 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1337 spin_unlock_irq(phba->host->host_lock); 1480 spin_unlock_irq(shost->host_lock);
1481 lpfc_disc_set_adisc(vport, ndlp);
1338 return ndlp->nlp_state; 1482 return ndlp->nlp_state;
1339} 1483}
1340 1484
1341static uint32_t 1485static uint32_t
1342lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba, 1486lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1343 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1487 void *arg, uint32_t evt)
1344{ 1488{
1345 struct lpfc_iocbq *cmdiocb; 1489 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1346
1347 cmdiocb = (struct lpfc_iocbq *) arg;
1348 1490
1349 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1491 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1350 return ndlp->nlp_state; 1492 return ndlp->nlp_state;
1351} 1493}
1352 1494
1353static uint32_t 1495static uint32_t
1354lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba, 1496lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1355 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1497 void *arg, uint32_t evt)
1356{ 1498{
1357 struct lpfc_iocbq *cmdiocb; 1499 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1358 1500
1359 cmdiocb = (struct lpfc_iocbq *) arg; 1501 lpfc_rcv_prli(vport, ndlp, cmdiocb);
1360 1502 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1361 lpfc_rcv_prli(phba, ndlp, cmdiocb);
1362 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1363 return ndlp->nlp_state; 1503 return ndlp->nlp_state;
1364} 1504}
1365 1505
1366static uint32_t 1506static uint32_t
1367lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba, 1507lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1368 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1508 void *arg, uint32_t evt)
1369{ 1509{
1370 struct lpfc_iocbq *cmdiocb; 1510 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1371 1511
1372 cmdiocb = (struct lpfc_iocbq *) arg; 1512 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1373
1374 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1375 return ndlp->nlp_state; 1513 return ndlp->nlp_state;
1376} 1514}
1377 1515
1378static uint32_t 1516static uint32_t
1379lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba, 1517lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1380 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1518 void *arg, uint32_t evt)
1381{ 1519{
1382 struct lpfc_iocbq *cmdiocb; 1520 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1383 1521
1384 cmdiocb = (struct lpfc_iocbq *) arg; 1522 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1385
1386 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1387 return ndlp->nlp_state; 1523 return ndlp->nlp_state;
1388} 1524}
1389 1525
1390static uint32_t 1526static uint32_t
1391lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba, 1527lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1392 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1528 void *arg, uint32_t evt)
1393{ 1529{
1394 struct lpfc_iocbq *cmdiocb; 1530 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1395
1396 cmdiocb = (struct lpfc_iocbq *) arg;
1397 1531
1398 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 1532 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1399 return ndlp->nlp_state; 1533 return ndlp->nlp_state;
1400} 1534}
1401 1535
1402static uint32_t 1536static uint32_t
1403lpfc_device_recov_unmap_node(struct lpfc_hba * phba, 1537lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1404 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1538 struct lpfc_nodelist *ndlp,
1539 void *arg,
1540 uint32_t evt)
1405{ 1541{
1542 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1543
1406 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1544 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1407 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1545 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1546 spin_lock_irq(shost->host_lock);
1408 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1547 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1409 lpfc_disc_set_adisc(phba, ndlp); 1548 spin_unlock_irq(shost->host_lock);
1549 lpfc_disc_set_adisc(vport, ndlp);
1410 1550
1411 return ndlp->nlp_state; 1551 return ndlp->nlp_state;
1412} 1552}
1413 1553
1414static uint32_t 1554static uint32_t
1415lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba, 1555lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1416 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1556 void *arg, uint32_t evt)
1417{ 1557{
1418 struct lpfc_iocbq *cmdiocb; 1558 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1419
1420 cmdiocb = (struct lpfc_iocbq *) arg;
1421 1559
1422 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1560 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1423 return ndlp->nlp_state; 1561 return ndlp->nlp_state;
1424} 1562}
1425 1563
1426static uint32_t 1564static uint32_t
1427lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba, 1565lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1428 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1566 void *arg, uint32_t evt)
1429{ 1567{
1430 struct lpfc_iocbq *cmdiocb; 1568 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1431 1569
1432 cmdiocb = (struct lpfc_iocbq *) arg; 1570 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1433
1434 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1435 return ndlp->nlp_state; 1571 return ndlp->nlp_state;
1436} 1572}
1437 1573
1438static uint32_t 1574static uint32_t
1439lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba, 1575lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1440 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1576 void *arg, uint32_t evt)
1441{ 1577{
1442 struct lpfc_iocbq *cmdiocb; 1578 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1443 1579
1444 cmdiocb = (struct lpfc_iocbq *) arg; 1580 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1445
1446 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1447 return ndlp->nlp_state; 1581 return ndlp->nlp_state;
1448} 1582}
1449 1583
1450static uint32_t 1584static uint32_t
1451lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba, 1585lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
1452 struct lpfc_nodelist * ndlp, void *arg, 1586 struct lpfc_nodelist *ndlp,
1453 uint32_t evt) 1587 void *arg, uint32_t evt)
1454{ 1588{
1455 struct lpfc_iocbq *cmdiocb; 1589 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1456
1457 cmdiocb = (struct lpfc_iocbq *) arg;
1458 1590
1459 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1591 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1460 return ndlp->nlp_state; 1592 return ndlp->nlp_state;
1461} 1593}
1462 1594
1463static uint32_t 1595static uint32_t
1464lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba, 1596lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1465 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1597 void *arg, uint32_t evt)
1466{ 1598{
1467 struct lpfc_iocbq *cmdiocb; 1599 struct lpfc_hba *phba = vport->phba;
1468 1600 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1469 cmdiocb = (struct lpfc_iocbq *) arg;
1470 1601
1471 /* flush the target */ 1602 /* flush the target */
1472 spin_lock_irq(phba->host->host_lock);
1473 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1603 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1474 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 1604 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
1475 spin_unlock_irq(phba->host->host_lock);
1476 1605
1477 /* Treat like rcv logo */ 1606 /* Treat like rcv logo */
1478 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); 1607 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1479 return ndlp->nlp_state; 1608 return ndlp->nlp_state;
1480} 1609}
1481 1610
1482static uint32_t 1611static uint32_t
1483lpfc_device_recov_mapped_node(struct lpfc_hba * phba, 1612lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
1484 struct lpfc_nodelist * ndlp, void *arg, 1613 struct lpfc_nodelist *ndlp,
1485 uint32_t evt) 1614 void *arg,
1615 uint32_t evt)
1486{ 1616{
1617 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1618
1487 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; 1619 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
1488 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1620 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1489 spin_lock_irq(phba->host->host_lock); 1621 spin_lock_irq(shost->host_lock);
1490 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1622 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1491 spin_unlock_irq(phba->host->host_lock); 1623 spin_unlock_irq(shost->host_lock);
1492 lpfc_disc_set_adisc(phba, ndlp); 1624 lpfc_disc_set_adisc(vport, ndlp);
1493 return ndlp->nlp_state; 1625 return ndlp->nlp_state;
1494} 1626}
1495 1627
1496static uint32_t 1628static uint32_t
1497lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba, 1629lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1498 struct lpfc_nodelist * ndlp, void *arg, 1630 void *arg, uint32_t evt)
1499 uint32_t evt)
1500{ 1631{
1501 struct lpfc_iocbq *cmdiocb; 1632 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1502 1633 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1503 cmdiocb = (struct lpfc_iocbq *) arg;
1504 1634
1505 /* Ignore PLOGI if we have an outstanding LOGO */ 1635 /* Ignore PLOGI if we have an outstanding LOGO */
1506 if (ndlp->nlp_flag & NLP_LOGO_SND) { 1636 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) {
1507 return ndlp->nlp_state; 1637 return ndlp->nlp_state;
1508 } 1638 }
1509 1639
1510 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 1640 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1511 spin_lock_irq(phba->host->host_lock); 1641 spin_lock_irq(shost->host_lock);
1512 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1642 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1513 spin_unlock_irq(phba->host->host_lock); 1643 spin_unlock_irq(shost->host_lock);
1514 return ndlp->nlp_state; 1644 return ndlp->nlp_state;
1515 } 1645 }
1516 1646
1517 /* send PLOGI immediately, move to PLOGI issue state */ 1647 /* send PLOGI immediately, move to PLOGI issue state */
1518 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1648 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1519 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1649 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1520 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 1650 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1521 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1651 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1522 } 1652 }
1523 1653
1524 return ndlp->nlp_state; 1654 return ndlp->nlp_state;
1525} 1655}
1526 1656
1527static uint32_t 1657static uint32_t
1528lpfc_rcv_prli_npr_node(struct lpfc_hba * phba, 1658lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1529 struct lpfc_nodelist * ndlp, void *arg, 1659 void *arg, uint32_t evt)
1530 uint32_t evt)
1531{ 1660{
1532 struct lpfc_iocbq *cmdiocb; 1661 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1533 struct ls_rjt stat; 1662 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1534 1663 struct ls_rjt stat;
1535 cmdiocb = (struct lpfc_iocbq *) arg;
1536 1664
1537 memset(&stat, 0, sizeof (struct ls_rjt)); 1665 memset(&stat, 0, sizeof (struct ls_rjt));
1538 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 1666 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1539 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 1667 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1540 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 1668 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1541 1669
1542 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1670 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1543 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1671 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1544 spin_lock_irq(phba->host->host_lock); 1672 spin_lock_irq(shost->host_lock);
1545 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1673 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1546 spin_unlock_irq(phba->host->host_lock);
1547 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1674 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1548 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 1675 spin_unlock_irq(shost->host_lock);
1549 lpfc_issue_els_adisc(phba, ndlp, 0); 1676 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1677 lpfc_issue_els_adisc(vport, ndlp, 0);
1550 } else { 1678 } else {
1551 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1679 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1552 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 1680 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1553 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1681 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1554 } 1682 }
1555 } 1683 }
1556 return ndlp->nlp_state; 1684 return ndlp->nlp_state;
1557} 1685}
1558 1686
1559static uint32_t 1687static uint32_t
1560lpfc_rcv_logo_npr_node(struct lpfc_hba * phba, 1688lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1561 struct lpfc_nodelist * ndlp, void *arg, 1689 void *arg, uint32_t evt)
1562 uint32_t evt)
1563{ 1690{
1564 struct lpfc_iocbq *cmdiocb; 1691 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1565 1692
1566 cmdiocb = (struct lpfc_iocbq *) arg; 1693 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1567
1568 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1569 return ndlp->nlp_state; 1694 return ndlp->nlp_state;
1570} 1695}
1571 1696
1572static uint32_t 1697static uint32_t
1573lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, 1698lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1574 struct lpfc_nodelist * ndlp, void *arg, 1699 void *arg, uint32_t evt)
1575 uint32_t evt)
1576{ 1700{
1577 struct lpfc_iocbq *cmdiocb; 1701 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1578
1579 cmdiocb = (struct lpfc_iocbq *) arg;
1580 1702
1581 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1703 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1582 1704
1583 /* 1705 /*
1584 * Do not start discovery if discovery is about to start 1706 * Do not start discovery if discovery is about to start
@@ -1586,53 +1708,52 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1586 * here will affect the counting of discovery threads. 1708 * here will affect the counting of discovery threads.
1587 */ 1709 */
1588 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && 1710 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1589 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){ 1711 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1590 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1712 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1713 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1591 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1714 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1592 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 1715 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1593 lpfc_issue_els_adisc(phba, ndlp, 0); 1716 lpfc_issue_els_adisc(vport, ndlp, 0);
1594 } else { 1717 } else {
1595 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1718 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1596 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 1719 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1597 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1720 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1598 } 1721 }
1599 } 1722 }
1600 return ndlp->nlp_state; 1723 return ndlp->nlp_state;
1601} 1724}
1602 1725
1603static uint32_t 1726static uint32_t
1604lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba, 1727lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1605 struct lpfc_nodelist * ndlp, void *arg, 1728 void *arg, uint32_t evt)
1606 uint32_t evt)
1607{ 1729{
1608 struct lpfc_iocbq *cmdiocb; 1730 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1609 1731 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1610 cmdiocb = (struct lpfc_iocbq *) arg;
1611 1732
1612 spin_lock_irq(phba->host->host_lock); 1733 spin_lock_irq(shost->host_lock);
1613 ndlp->nlp_flag |= NLP_LOGO_ACC; 1734 ndlp->nlp_flag |= NLP_LOGO_ACC;
1614 spin_unlock_irq(phba->host->host_lock); 1735 spin_unlock_irq(shost->host_lock);
1615 1736
1616 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1737 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1617 1738
1618 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1739 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
1619 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1740 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1620 spin_lock_irq(phba->host->host_lock); 1741 spin_lock_irq(shost->host_lock);
1621 ndlp->nlp_flag |= NLP_DELAY_TMO; 1742 ndlp->nlp_flag |= NLP_DELAY_TMO;
1622 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1743 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1623 spin_unlock_irq(phba->host->host_lock); 1744 spin_unlock_irq(shost->host_lock);
1624 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1745 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1625 } else { 1746 } else {
1626 spin_lock_irq(phba->host->host_lock); 1747 spin_lock_irq(shost->host_lock);
1627 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1748 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1628 spin_unlock_irq(phba->host->host_lock); 1749 spin_unlock_irq(shost->host_lock);
1629 } 1750 }
1630 return ndlp->nlp_state; 1751 return ndlp->nlp_state;
1631} 1752}
1632 1753
1633static uint32_t 1754static uint32_t
1634lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba, 1755lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1635 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1756 void *arg, uint32_t evt)
1636{ 1757{
1637 struct lpfc_iocbq *cmdiocb, *rspiocb; 1758 struct lpfc_iocbq *cmdiocb, *rspiocb;
1638 IOCB_t *irsp; 1759 IOCB_t *irsp;
@@ -1642,15 +1763,15 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
1642 1763
1643 irsp = &rspiocb->iocb; 1764 irsp = &rspiocb->iocb;
1644 if (irsp->ulpStatus) { 1765 if (irsp->ulpStatus) {
1645 lpfc_drop_node(phba, ndlp); 1766 lpfc_drop_node(vport, ndlp);
1646 return NLP_STE_FREED_NODE; 1767 return NLP_STE_FREED_NODE;
1647 } 1768 }
1648 return ndlp->nlp_state; 1769 return ndlp->nlp_state;
1649} 1770}
1650 1771
1651static uint32_t 1772static uint32_t
1652lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba, 1773lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1653 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1774 void *arg, uint32_t evt)
1654{ 1775{
1655 struct lpfc_iocbq *cmdiocb, *rspiocb; 1776 struct lpfc_iocbq *cmdiocb, *rspiocb;
1656 IOCB_t *irsp; 1777 IOCB_t *irsp;
@@ -1660,25 +1781,24 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
1660 1781
1661 irsp = &rspiocb->iocb; 1782 irsp = &rspiocb->iocb;
1662 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1783 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1663 lpfc_drop_node(phba, ndlp); 1784 lpfc_drop_node(vport, ndlp);
1664 return NLP_STE_FREED_NODE; 1785 return NLP_STE_FREED_NODE;
1665 } 1786 }
1666 return ndlp->nlp_state; 1787 return ndlp->nlp_state;
1667} 1788}
1668 1789
1669static uint32_t 1790static uint32_t
1670lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba, 1791lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1671 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1792 void *arg, uint32_t evt)
1672{ 1793{
1673 lpfc_unreg_rpi(phba, ndlp); 1794 lpfc_unreg_rpi(vport, ndlp);
1674 /* This routine does nothing, just return the current state */ 1795 /* This routine does nothing, just return the current state */
1675 return ndlp->nlp_state; 1796 return ndlp->nlp_state;
1676} 1797}
1677 1798
1678static uint32_t 1799static uint32_t
1679lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba, 1800lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1680 struct lpfc_nodelist * ndlp, void *arg, 1801 void *arg, uint32_t evt)
1681 uint32_t evt)
1682{ 1802{
1683 struct lpfc_iocbq *cmdiocb, *rspiocb; 1803 struct lpfc_iocbq *cmdiocb, *rspiocb;
1684 IOCB_t *irsp; 1804 IOCB_t *irsp;
@@ -1688,28 +1808,25 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
1688 1808
1689 irsp = &rspiocb->iocb; 1809 irsp = &rspiocb->iocb;
1690 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1810 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1691 lpfc_drop_node(phba, ndlp); 1811 lpfc_drop_node(vport, ndlp);
1692 return NLP_STE_FREED_NODE; 1812 return NLP_STE_FREED_NODE;
1693 } 1813 }
1694 return ndlp->nlp_state; 1814 return ndlp->nlp_state;
1695} 1815}
1696 1816
1697static uint32_t 1817static uint32_t
1698lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba, 1818lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1699 struct lpfc_nodelist * ndlp, void *arg, 1819 struct lpfc_nodelist *ndlp,
1700 uint32_t evt) 1820 void *arg, uint32_t evt)
1701{ 1821{
1702 LPFC_MBOXQ_t *pmb; 1822 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1703 MAILBOX_t *mb; 1823 MAILBOX_t *mb = &pmb->mb;
1704
1705 pmb = (LPFC_MBOXQ_t *) arg;
1706 mb = &pmb->mb;
1707 1824
1708 if (!mb->mbxStatus) 1825 if (!mb->mbxStatus)
1709 ndlp->nlp_rpi = mb->un.varWords[0]; 1826 ndlp->nlp_rpi = mb->un.varWords[0];
1710 else { 1827 else {
1711 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1828 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1712 lpfc_drop_node(phba, ndlp); 1829 lpfc_drop_node(vport, ndlp);
1713 return NLP_STE_FREED_NODE; 1830 return NLP_STE_FREED_NODE;
1714 } 1831 }
1715 } 1832 }
@@ -1717,28 +1834,38 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1717} 1834}
1718 1835
1719static uint32_t 1836static uint32_t
1720lpfc_device_rm_npr_node(struct lpfc_hba * phba, 1837lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1721 struct lpfc_nodelist * ndlp, void *arg, 1838 void *arg, uint32_t evt)
1722 uint32_t evt)
1723{ 1839{
1840 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1841
1724 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1842 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1843 spin_lock_irq(shost->host_lock);
1725 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1844 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1845 spin_unlock_irq(shost->host_lock);
1726 return ndlp->nlp_state; 1846 return ndlp->nlp_state;
1727 } 1847 }
1728 lpfc_drop_node(phba, ndlp); 1848 lpfc_drop_node(vport, ndlp);
1729 return NLP_STE_FREED_NODE; 1849 return NLP_STE_FREED_NODE;
1730} 1850}
1731 1851
1732static uint32_t 1852static uint32_t
1733lpfc_device_recov_npr_node(struct lpfc_hba * phba, 1853lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1734 struct lpfc_nodelist * ndlp, void *arg, 1854 void *arg, uint32_t evt)
1735 uint32_t evt)
1736{ 1855{
1737 spin_lock_irq(phba->host->host_lock); 1856 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1857
1858 /* Don't do anything that will mess up processing of the
1859 * previous RSCN.
1860 */
1861 if (vport->fc_flag & FC_RSCN_DEFERRED)
1862 return ndlp->nlp_state;
1863
1864 spin_lock_irq(shost->host_lock);
1738 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1865 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1739 spin_unlock_irq(phba->host->host_lock); 1866 spin_unlock_irq(shost->host_lock);
1740 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1867 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1741 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1868 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1742 } 1869 }
1743 return ndlp->nlp_state; 1870 return ndlp->nlp_state;
1744} 1871}
@@ -1801,7 +1928,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1801 */ 1928 */
1802 1929
1803static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) 1930static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1804 (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = { 1931 (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
1805 /* Action routine Event Current State */ 1932 /* Action routine Event Current State */
1806 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ 1933 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
1807 lpfc_rcv_els_unused_node, /* RCV_PRLI */ 1934 lpfc_rcv_els_unused_node, /* RCV_PRLI */
@@ -1818,7 +1945,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1818 lpfc_disc_illegal, /* DEVICE_RECOVERY */ 1945 lpfc_disc_illegal, /* DEVICE_RECOVERY */
1819 1946
1820 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 1947 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
1821 lpfc_rcv_els_plogi_issue, /* RCV_PRLI */ 1948 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
1822 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ 1949 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
1823 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ 1950 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
1824 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ 1951 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
@@ -1917,35 +2044,41 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1917}; 2044};
1918 2045
1919int 2046int
1920lpfc_disc_state_machine(struct lpfc_hba * phba, 2047lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1921 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 2048 void *arg, uint32_t evt)
1922{ 2049{
2050 struct lpfc_hba *phba = vport->phba;
1923 uint32_t cur_state, rc; 2051 uint32_t cur_state, rc;
1924 uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *, 2052 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
1925 uint32_t); 2053 uint32_t);
1926 2054
1927 lpfc_nlp_get(ndlp); 2055 lpfc_nlp_get(ndlp);
1928 cur_state = ndlp->nlp_state; 2056 cur_state = ndlp->nlp_state;
1929 2057
1930 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2058 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
1931 lpfc_printf_log(phba, 2059 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1932 KERN_INFO, 2060 "%d (%d):0211 DSM in event x%x on NPort x%x in "
1933 LOG_DISCOVERY, 2061 "state %d Data: x%x\n",
1934 "%d:0211 DSM in event x%x on NPort x%x in state %d " 2062 phba->brd_no, vport->vpi,
1935 "Data: x%x\n",
1936 phba->brd_no,
1937 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); 2063 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
1938 2064
2065 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2066 "DSM in: evt:%d ste:%d did:x%x",
2067 evt, cur_state, ndlp->nlp_DID);
2068
1939 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; 2069 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
1940 rc = (func) (phba, ndlp, arg, evt); 2070 rc = (func) (vport, ndlp, arg, evt);
1941 2071
1942 /* DSM out state <rc> on NPort <nlp_DID> */ 2072 /* DSM out state <rc> on NPort <nlp_DID> */
1943 lpfc_printf_log(phba, 2073 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1944 KERN_INFO, 2074 "%d (%d):0212 DSM out state %d on NPort x%x "
1945 LOG_DISCOVERY, 2075 "Data: x%x\n",
1946 "%d:0212 DSM out state %d on NPort x%x Data: x%x\n", 2076 phba->brd_no, vport->vpi,
1947 phba->brd_no, 2077 rc, ndlp->nlp_DID, ndlp->nlp_flag);
1948 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2078
2079 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2080 "DSM out: ste:%d did:x%x flg:x%x",
2081 rc, ndlp->nlp_DID, ndlp->nlp_flag);
1949 2082
1950 lpfc_nlp_put(ndlp); 2083 lpfc_nlp_put(ndlp);
1951 2084
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9a12d05e99e4..8f45bbc42126 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -37,10 +37,158 @@
37#include "lpfc.h" 37#include "lpfc.h"
38#include "lpfc_logmsg.h" 38#include "lpfc_logmsg.h"
39#include "lpfc_crtn.h" 39#include "lpfc_crtn.h"
40#include "lpfc_vport.h"
40 41
41#define LPFC_RESET_WAIT 2 42#define LPFC_RESET_WAIT 2
42#define LPFC_ABORT_WAIT 2 43#define LPFC_ABORT_WAIT 2
43 44
45/*
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
48 */
49void
50lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51{
52 unsigned long flags;
53
54 spin_lock_irqsave(&phba->hbalock, flags);
55 atomic_inc(&phba->num_rsrc_err);
56 phba->last_rsrc_error_time = jiffies;
57
58 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
59 spin_unlock_irqrestore(&phba->hbalock, flags);
60 return;
61 }
62
63 phba->last_ramp_down_time = jiffies;
64
65 spin_unlock_irqrestore(&phba->hbalock, flags);
66
67 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68 if ((phba->pport->work_port_events &
69 WORKER_RAMP_DOWN_QUEUE) == 0) {
70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71 }
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73
74 spin_lock_irqsave(&phba->hbalock, flags);
75 if (phba->work_wait)
76 wake_up(phba->work_wait);
77 spin_unlock_irqrestore(&phba->hbalock, flags);
78
79 return;
80}
81
82/*
83 * This function is called with no lock held when there is a successful
84 * SCSI command completion.
85 */
86static inline void
87lpfc_rampup_queue_depth(struct lpfc_hba *phba,
88 struct scsi_device *sdev)
89{
90 unsigned long flags;
91 atomic_inc(&phba->num_cmd_success);
92
93 if (phba->cfg_lun_queue_depth <= sdev->queue_depth)
94 return;
95
96 spin_lock_irqsave(&phba->hbalock, flags);
97 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
98 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
99 spin_unlock_irqrestore(&phba->hbalock, flags);
100 return;
101 }
102
103 phba->last_ramp_up_time = jiffies;
104 spin_unlock_irqrestore(&phba->hbalock, flags);
105
106 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
107 if ((phba->pport->work_port_events &
108 WORKER_RAMP_UP_QUEUE) == 0) {
109 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
110 }
111 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
112
113 spin_lock_irqsave(&phba->hbalock, flags);
114 if (phba->work_wait)
115 wake_up(phba->work_wait);
116 spin_unlock_irqrestore(&phba->hbalock, flags);
117}
118
119void
120lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
121{
122 struct lpfc_vport *vport;
123 struct Scsi_Host *host;
124 struct scsi_device *sdev;
125 unsigned long new_queue_depth;
126 unsigned long num_rsrc_err, num_cmd_success;
127
128 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
129 num_cmd_success = atomic_read(&phba->num_cmd_success);
130
131 spin_lock_irq(&phba->hbalock);
132 list_for_each_entry(vport, &phba->port_list, listentry) {
133 host = lpfc_shost_from_vport(vport);
134 if (!scsi_host_get(host))
135 continue;
136
137 spin_unlock_irq(&phba->hbalock);
138
139 shost_for_each_device(sdev, host) {
140 new_queue_depth = sdev->queue_depth * num_rsrc_err /
141 (num_rsrc_err + num_cmd_success);
142 if (!new_queue_depth)
143 new_queue_depth = sdev->queue_depth - 1;
144 else
145 new_queue_depth =
146 sdev->queue_depth - new_queue_depth;
147
148 if (sdev->ordered_tags)
149 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
150 new_queue_depth);
151 else
152 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
153 new_queue_depth);
154 }
155 spin_lock_irq(&phba->hbalock);
156 scsi_host_put(host);
157 }
158 spin_unlock_irq(&phba->hbalock);
159 atomic_set(&phba->num_rsrc_err, 0);
160 atomic_set(&phba->num_cmd_success, 0);
161}
162
163void
164lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
165{
166 struct lpfc_vport *vport;
167 struct Scsi_Host *host;
168 struct scsi_device *sdev;
169
170 spin_lock_irq(&phba->hbalock);
171 list_for_each_entry(vport, &phba->port_list, listentry) {
172 host = lpfc_shost_from_vport(vport);
173 if (!scsi_host_get(host))
174 continue;
175
176 spin_unlock_irq(&phba->hbalock);
177 shost_for_each_device(sdev, host) {
178 if (sdev->ordered_tags)
179 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
180 sdev->queue_depth+1);
181 else
182 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
183 sdev->queue_depth+1);
184 }
185 spin_lock_irq(&phba->hbalock);
186 scsi_host_put(host);
187 }
188 spin_unlock_irq(&phba->hbalock);
189 atomic_set(&phba->num_rsrc_err, 0);
190 atomic_set(&phba->num_cmd_success, 0);
191}
44 192
45/* 193/*
46 * This routine allocates a scsi buffer, which contains all the necessary 194 * This routine allocates a scsi buffer, which contains all the necessary
@@ -51,8 +199,9 @@
51 * and the BPL BDE is setup in the IOCB. 199 * and the BPL BDE is setup in the IOCB.
52 */ 200 */
53static struct lpfc_scsi_buf * 201static struct lpfc_scsi_buf *
54lpfc_new_scsi_buf(struct lpfc_hba * phba) 202lpfc_new_scsi_buf(struct lpfc_vport *vport)
55{ 203{
204 struct lpfc_hba *phba = vport->phba;
56 struct lpfc_scsi_buf *psb; 205 struct lpfc_scsi_buf *psb;
57 struct ulp_bde64 *bpl; 206 struct ulp_bde64 *bpl;
58 IOCB_t *iocb; 207 IOCB_t *iocb;
@@ -63,7 +212,6 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba)
63 if (!psb) 212 if (!psb)
64 return NULL; 213 return NULL;
65 memset(psb, 0, sizeof (struct lpfc_scsi_buf)); 214 memset(psb, 0, sizeof (struct lpfc_scsi_buf));
66 psb->scsi_hba = phba;
67 215
68 /* 216 /*
69 * Get memory from the pci pool to map the virt space to pci bus space 217 * Get memory from the pci pool to map the virt space to pci bus space
@@ -155,7 +303,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
155} 303}
156 304
157static void 305static void
158lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 306lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
159{ 307{
160 unsigned long iflag = 0; 308 unsigned long iflag = 0;
161 309
@@ -166,7 +314,7 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
166} 314}
167 315
168static int 316static int
169lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) 317lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
170{ 318{
171 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 319 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
172 struct scatterlist *sgel = NULL; 320 struct scatterlist *sgel = NULL;
@@ -175,8 +323,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
175 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 323 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
176 dma_addr_t physaddr; 324 dma_addr_t physaddr;
177 uint32_t i, num_bde = 0; 325 uint32_t i, num_bde = 0;
178 int datadir = scsi_cmnd->sc_data_direction; 326 int nseg, datadir = scsi_cmnd->sc_data_direction;
179 int dma_error;
180 327
181 /* 328 /*
182 * There are three possibilities here - use scatter-gather segment, use 329 * There are three possibilities here - use scatter-gather segment, use
@@ -185,26 +332,26 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
185 * data bde entry. 332 * data bde entry.
186 */ 333 */
187 bpl += 2; 334 bpl += 2;
188 if (scsi_cmnd->use_sg) { 335 if (scsi_sg_count(scsi_cmnd)) {
189 /* 336 /*
190 * The driver stores the segment count returned from pci_map_sg 337 * The driver stores the segment count returned from pci_map_sg
191 * because this a count of dma-mappings used to map the use_sg 338 * because this a count of dma-mappings used to map the use_sg
192 * pages. They are not guaranteed to be the same for those 339 * pages. They are not guaranteed to be the same for those
193 * architectures that implement an IOMMU. 340 * architectures that implement an IOMMU.
194 */ 341 */
195 sgel = (struct scatterlist *)scsi_cmnd->request_buffer; 342
196 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, 343 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
197 scsi_cmnd->use_sg, datadir); 344 scsi_sg_count(scsi_cmnd), datadir);
198 if (lpfc_cmd->seg_cnt == 0) 345 if (unlikely(!nseg))
199 return 1; 346 return 1;
200 347
348 lpfc_cmd->seg_cnt = nseg;
201 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 349 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
202 printk(KERN_ERR "%s: Too many sg segments from " 350 printk(KERN_ERR "%s: Too many sg segments from "
203 "dma_map_sg. Config %d, seg_cnt %d", 351 "dma_map_sg. Config %d, seg_cnt %d",
204 __FUNCTION__, phba->cfg_sg_seg_cnt, 352 __FUNCTION__, phba->cfg_sg_seg_cnt,
205 lpfc_cmd->seg_cnt); 353 lpfc_cmd->seg_cnt);
206 dma_unmap_sg(&phba->pcidev->dev, sgel, 354 scsi_dma_unmap(scsi_cmnd);
207 lpfc_cmd->seg_cnt, datadir);
208 return 1; 355 return 1;
209 } 356 }
210 357
@@ -214,7 +361,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
214 * single scsi command. Just run through the seg_cnt and format 361 * single scsi command. Just run through the seg_cnt and format
215 * the bde's. 362 * the bde's.
216 */ 363 */
217 for (i = 0; i < lpfc_cmd->seg_cnt; i++) { 364 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
218 physaddr = sg_dma_address(sgel); 365 physaddr = sg_dma_address(sgel);
219 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 366 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
220 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 367 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
@@ -225,34 +372,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
225 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 372 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
226 bpl->tus.w = le32_to_cpu(bpl->tus.w); 373 bpl->tus.w = le32_to_cpu(bpl->tus.w);
227 bpl++; 374 bpl++;
228 sgel++;
229 num_bde++; 375 num_bde++;
230 } 376 }
231 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
232 physaddr = dma_map_single(&phba->pcidev->dev,
233 scsi_cmnd->request_buffer,
234 scsi_cmnd->request_bufflen,
235 datadir);
236 dma_error = dma_mapping_error(physaddr);
237 if (dma_error) {
238 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
239 "%d:0718 Unable to dma_map_single "
240 "request_buffer: x%x\n",
241 phba->brd_no, dma_error);
242 return 1;
243 }
244
245 lpfc_cmd->nonsg_phys = physaddr;
246 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
247 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
248 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
249 if (datadir == DMA_TO_DEVICE)
250 bpl->tus.f.bdeFlags = 0;
251 else
252 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
253 bpl->tus.w = le32_to_cpu(bpl->tus.w);
254 num_bde = 1;
255 bpl++;
256 } 377 }
257 378
258 /* 379 /*
@@ -266,7 +387,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
266 (num_bde * sizeof (struct ulp_bde64)); 387 (num_bde * sizeof (struct ulp_bde64));
267 iocb_cmd->ulpBdeCount = 1; 388 iocb_cmd->ulpBdeCount = 1;
268 iocb_cmd->ulpLe = 1; 389 iocb_cmd->ulpLe = 1;
269 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); 390 fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
270 return 0; 391 return 0;
271} 392}
272 393
@@ -279,26 +400,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
279 * a request buffer, but did not request use_sg. There is a third 400 * a request buffer, but did not request use_sg. There is a third
280 * case, but it does not require resource deallocation. 401 * case, but it does not require resource deallocation.
281 */ 402 */
282 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 403 if (psb->seg_cnt > 0)
283 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 404 scsi_dma_unmap(psb->pCmd);
284 psb->seg_cnt, psb->pCmd->sc_data_direction);
285 } else {
286 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
287 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
288 psb->pCmd->request_bufflen,
289 psb->pCmd->sc_data_direction);
290 }
291 }
292} 405}
293 406
294static void 407static void
295lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) 408lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
409 struct lpfc_iocbq *rsp_iocb)
296{ 410{
297 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 411 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
298 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 412 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
299 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 413 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
300 struct lpfc_hba *phba = lpfc_cmd->scsi_hba; 414 struct lpfc_hba *phba = vport->phba;
301 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 415 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
416 uint32_t vpi = vport->vpi;
302 uint32_t resp_info = fcprsp->rspStatus2; 417 uint32_t resp_info = fcprsp->rspStatus2;
303 uint32_t scsi_status = fcprsp->rspStatus3; 418 uint32_t scsi_status = fcprsp->rspStatus3;
304 uint32_t *lp; 419 uint32_t *lp;
@@ -331,9 +446,9 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
331 logit = LOG_FCP; 446 logit = LOG_FCP;
332 447
333 lpfc_printf_log(phba, KERN_WARNING, logit, 448 lpfc_printf_log(phba, KERN_WARNING, logit,
334 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x " 449 "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
335 "Data: x%x x%x x%x x%x x%x\n", 450 "Data: x%x x%x x%x x%x x%x\n",
336 phba->brd_no, cmnd->cmnd[0], scsi_status, 451 phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
337 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 452 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
338 be32_to_cpu(fcprsp->rspResId), 453 be32_to_cpu(fcprsp->rspResId),
339 be32_to_cpu(fcprsp->rspSnsLen), 454 be32_to_cpu(fcprsp->rspSnsLen),
@@ -349,15 +464,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
349 } 464 }
350 } 465 }
351 466
352 cmnd->resid = 0; 467 scsi_set_resid(cmnd, 0);
353 if (resp_info & RESID_UNDER) { 468 if (resp_info & RESID_UNDER) {
354 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 469 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
355 470
356 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 471 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
357 "%d:0716 FCP Read Underrun, expected %d, " 472 "%d (%d):0716 FCP Read Underrun, expected %d, "
358 "residual %d Data: x%x x%x x%x\n", phba->brd_no, 473 "residual %d Data: x%x x%x x%x\n",
359 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, 474 phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
360 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 475 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
476 cmnd->underflow);
361 477
362 /* 478 /*
363 * If there is an under run check if under run reported by 479 * If there is an under run check if under run reported by
@@ -366,15 +482,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
366 */ 482 */
367 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 483 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
368 fcpi_parm && 484 fcpi_parm &&
369 (cmnd->resid != fcpi_parm)) { 485 (scsi_get_resid(cmnd) != fcpi_parm)) {
370 lpfc_printf_log(phba, KERN_WARNING, 486 lpfc_printf_log(phba, KERN_WARNING,
371 LOG_FCP | LOG_FCP_ERROR, 487 LOG_FCP | LOG_FCP_ERROR,
372 "%d:0735 FCP Read Check Error and Underrun " 488 "%d (%d):0735 FCP Read Check Error "
373 "Data: x%x x%x x%x x%x\n", phba->brd_no, 489 "and Underrun Data: x%x x%x x%x x%x\n",
374 be32_to_cpu(fcpcmd->fcpDl), 490 phba->brd_no, vpi,
375 cmnd->resid, 491 be32_to_cpu(fcpcmd->fcpDl),
376 fcpi_parm, cmnd->cmnd[0]); 492 scsi_get_resid(cmnd), fcpi_parm,
377 cmnd->resid = cmnd->request_bufflen; 493 cmnd->cmnd[0]);
494 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
378 host_status = DID_ERROR; 495 host_status = DID_ERROR;
379 } 496 }
380 /* 497 /*
@@ -385,22 +502,23 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
385 */ 502 */
386 if (!(resp_info & SNS_LEN_VALID) && 503 if (!(resp_info & SNS_LEN_VALID) &&
387 (scsi_status == SAM_STAT_GOOD) && 504 (scsi_status == SAM_STAT_GOOD) &&
388 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 505 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
506 < cmnd->underflow)) {
389 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 507 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
390 "%d:0717 FCP command x%x residual " 508 "%d (%d):0717 FCP command x%x residual "
391 "underrun converted to error " 509 "underrun converted to error "
392 "Data: x%x x%x x%x\n", phba->brd_no, 510 "Data: x%x x%x x%x\n",
393 cmnd->cmnd[0], cmnd->request_bufflen, 511 phba->brd_no, vpi, cmnd->cmnd[0],
394 cmnd->resid, cmnd->underflow); 512 scsi_bufflen(cmnd),
395 513 scsi_get_resid(cmnd), cmnd->underflow);
396 host_status = DID_ERROR; 514 host_status = DID_ERROR;
397 } 515 }
398 } else if (resp_info & RESID_OVER) { 516 } else if (resp_info & RESID_OVER) {
399 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 517 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
400 "%d:0720 FCP command x%x residual " 518 "%d (%d):0720 FCP command x%x residual "
401 "overrun error. Data: x%x x%x \n", 519 "overrun error. Data: x%x x%x \n",
402 phba->brd_no, cmnd->cmnd[0], 520 phba->brd_no, vpi, cmnd->cmnd[0],
403 cmnd->request_bufflen, cmnd->resid); 521 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
404 host_status = DID_ERROR; 522 host_status = DID_ERROR;
405 523
406 /* 524 /*
@@ -410,13 +528,14 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
410 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 528 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
411 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 529 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
412 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 530 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
413 "%d:0734 FCP Read Check Error Data: " 531 "%d (%d):0734 FCP Read Check Error Data: "
414 "x%x x%x x%x x%x\n", phba->brd_no, 532 "x%x x%x x%x x%x\n",
415 be32_to_cpu(fcpcmd->fcpDl), 533 phba->brd_no, vpi,
416 be32_to_cpu(fcprsp->rspResId), 534 be32_to_cpu(fcpcmd->fcpDl),
417 fcpi_parm, cmnd->cmnd[0]); 535 be32_to_cpu(fcprsp->rspResId),
536 fcpi_parm, cmnd->cmnd[0]);
418 host_status = DID_ERROR; 537 host_status = DID_ERROR;
419 cmnd->resid = cmnd->request_bufflen; 538 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
420 } 539 }
421 540
422 out: 541 out:
@@ -429,9 +548,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
429{ 548{
430 struct lpfc_scsi_buf *lpfc_cmd = 549 struct lpfc_scsi_buf *lpfc_cmd =
431 (struct lpfc_scsi_buf *) pIocbIn->context1; 550 (struct lpfc_scsi_buf *) pIocbIn->context1;
551 struct lpfc_vport *vport = pIocbIn->vport;
432 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 552 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
433 struct lpfc_nodelist *pnode = rdata->pnode; 553 struct lpfc_nodelist *pnode = rdata->pnode;
434 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 554 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
555 uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
556 ? lpfc_cmd->cur_iocbq.vport->vpi
557 : 0);
435 int result; 558 int result;
436 struct scsi_device *sdev, *tmp_sdev; 559 struct scsi_device *sdev, *tmp_sdev;
437 int depth = 0; 560 int depth = 0;
@@ -447,22 +570,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
447 lpfc_cmd->status = IOSTAT_DEFAULT; 570 lpfc_cmd->status = IOSTAT_DEFAULT;
448 571
449 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 572 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
450 "%d:0729 FCP cmd x%x failed <%d/%d> status: " 573 "%d (%d):0729 FCP cmd x%x failed <%d/%d> "
451 "x%x result: x%x Data: x%x x%x\n", 574 "status: x%x result: x%x Data: x%x x%x\n",
452 phba->brd_no, cmd->cmnd[0], cmd->device->id, 575 phba->brd_no, vpi, cmd->cmnd[0],
453 cmd->device->lun, lpfc_cmd->status, 576 cmd->device ? cmd->device->id : 0xffff,
454 lpfc_cmd->result, pIocbOut->iocb.ulpContext, 577 cmd->device ? cmd->device->lun : 0xffff,
578 lpfc_cmd->status, lpfc_cmd->result,
579 pIocbOut->iocb.ulpContext,
455 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 580 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
456 581
457 switch (lpfc_cmd->status) { 582 switch (lpfc_cmd->status) {
458 case IOSTAT_FCP_RSP_ERROR: 583 case IOSTAT_FCP_RSP_ERROR:
459 /* Call FCP RSP handler to determine result */ 584 /* Call FCP RSP handler to determine result */
460 lpfc_handle_fcp_err(lpfc_cmd,pIocbOut); 585 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
461 break; 586 break;
462 case IOSTAT_NPORT_BSY: 587 case IOSTAT_NPORT_BSY:
463 case IOSTAT_FABRIC_BSY: 588 case IOSTAT_FABRIC_BSY:
464 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 589 cmd->result = ScsiResult(DID_BUS_BUSY, 0);
465 break; 590 break;
591 case IOSTAT_LOCAL_REJECT:
592 if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
593 lpfc_cmd->result == IOERR_NO_RESOURCES ||
594 lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
595 cmd->result = ScsiResult(DID_REQUEUE, 0);
596 break;
597 } /* else: fall through */
466 default: 598 default:
467 cmd->result = ScsiResult(DID_ERROR, 0); 599 cmd->result = ScsiResult(DID_ERROR, 0);
468 break; 600 break;
@@ -479,11 +611,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
479 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 611 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
480 612
481 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 613 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
482 "%d:0710 Iodone <%d/%d> cmd %p, error x%x " 614 "%d (%d):0710 Iodone <%d/%d> cmd %p, error "
483 "SNS x%x x%x Data: x%x x%x\n", 615 "x%x SNS x%x x%x Data: x%x x%x\n",
484 phba->brd_no, cmd->device->id, 616 phba->brd_no, vpi, cmd->device->id,
485 cmd->device->lun, cmd, cmd->result, 617 cmd->device->lun, cmd, cmd->result,
486 *lp, *(lp + 3), cmd->retries, cmd->resid); 618 *lp, *(lp + 3), cmd->retries,
619 scsi_get_resid(cmd));
487 } 620 }
488 621
489 result = cmd->result; 622 result = cmd->result;
@@ -496,6 +629,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
496 return; 629 return;
497 } 630 }
498 631
632
633 if (!result)
634 lpfc_rampup_queue_depth(phba, sdev);
635
499 if (!result && pnode != NULL && 636 if (!result && pnode != NULL &&
500 ((jiffies - pnode->last_ramp_up_time) > 637 ((jiffies - pnode->last_ramp_up_time) >
501 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 638 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
@@ -534,7 +671,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
534 tmp_sdev->queue_depth - 1); 671 tmp_sdev->queue_depth - 1);
535 } 672 }
536 /* 673 /*
537 * The queue depth cannot be lowered any more. 674 * The queue depth cannot be lowered any more.
538 * Modify the returned error code to store 675 * Modify the returned error code to store
539 * the final depth value set by 676 * the final depth value set by
540 * scsi_track_queue_full. 677 * scsi_track_queue_full.
@@ -544,8 +681,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
544 681
545 if (depth) { 682 if (depth) {
546 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 683 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
547 "%d:0711 detected queue full - lun queue depth " 684 "%d (%d):0711 detected queue full - "
548 " adjusted to %d.\n", phba->brd_no, depth); 685 "lun queue depth adjusted to %d.\n",
686 phba->brd_no, vpi, depth);
549 } 687 }
550 } 688 }
551 689
@@ -553,9 +691,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
553} 691}
554 692
555static void 693static void
556lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, 694lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
557 struct lpfc_nodelist *pnode) 695 struct lpfc_nodelist *pnode)
558{ 696{
697 struct lpfc_hba *phba = vport->phba;
559 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 698 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
560 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 699 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
561 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 700 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
@@ -592,7 +731,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
592 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 731 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
593 * data bde entry. 732 * data bde entry.
594 */ 733 */
595 if (scsi_cmnd->use_sg) { 734 if (scsi_sg_count(scsi_cmnd)) {
596 if (datadir == DMA_TO_DEVICE) { 735 if (datadir == DMA_TO_DEVICE) {
597 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 736 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
598 iocb_cmd->un.fcpi.fcpi_parm = 0; 737 iocb_cmd->un.fcpi.fcpi_parm = 0;
@@ -602,23 +741,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
602 } else { 741 } else {
603 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 742 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
604 iocb_cmd->ulpPU = PARM_READ_CHECK; 743 iocb_cmd->ulpPU = PARM_READ_CHECK;
605 iocb_cmd->un.fcpi.fcpi_parm = 744 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
606 scsi_cmnd->request_bufflen;
607 fcp_cmnd->fcpCntl3 = READ_DATA;
608 phba->fc4InputRequests++;
609 }
610 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
611 if (datadir == DMA_TO_DEVICE) {
612 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
613 iocb_cmd->un.fcpi.fcpi_parm = 0;
614 iocb_cmd->ulpPU = 0;
615 fcp_cmnd->fcpCntl3 = WRITE_DATA;
616 phba->fc4OutputRequests++;
617 } else {
618 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
619 iocb_cmd->ulpPU = PARM_READ_CHECK;
620 iocb_cmd->un.fcpi.fcpi_parm =
621 scsi_cmnd->request_bufflen;
622 fcp_cmnd->fcpCntl3 = READ_DATA; 745 fcp_cmnd->fcpCntl3 = READ_DATA;
623 phba->fc4InputRequests++; 746 phba->fc4InputRequests++;
624 } 747 }
@@ -642,15 +765,15 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
642 piocbq->context1 = lpfc_cmd; 765 piocbq->context1 = lpfc_cmd;
643 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 766 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
644 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 767 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
768 piocbq->vport = vport;
645} 769}
646 770
647static int 771static int
648lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 772lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
649 struct lpfc_scsi_buf *lpfc_cmd, 773 struct lpfc_scsi_buf *lpfc_cmd,
650 unsigned int lun, 774 unsigned int lun,
651 uint8_t task_mgmt_cmd) 775 uint8_t task_mgmt_cmd)
652{ 776{
653 struct lpfc_sli *psli;
654 struct lpfc_iocbq *piocbq; 777 struct lpfc_iocbq *piocbq;
655 IOCB_t *piocb; 778 IOCB_t *piocb;
656 struct fcp_cmnd *fcp_cmnd; 779 struct fcp_cmnd *fcp_cmnd;
@@ -661,8 +784,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
661 return 0; 784 return 0;
662 } 785 }
663 786
664 psli = &phba->sli;
665 piocbq = &(lpfc_cmd->cur_iocbq); 787 piocbq = &(lpfc_cmd->cur_iocbq);
788 piocbq->vport = vport;
789
666 piocb = &piocbq->iocb; 790 piocb = &piocbq->iocb;
667 791
668 fcp_cmnd = lpfc_cmd->fcp_cmnd; 792 fcp_cmnd = lpfc_cmd->fcp_cmnd;
@@ -688,7 +812,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
688 piocb->ulpTimeout = lpfc_cmd->timeout; 812 piocb->ulpTimeout = lpfc_cmd->timeout;
689 } 813 }
690 814
691 return (1); 815 return 1;
692} 816}
693 817
694static void 818static void
@@ -704,10 +828,11 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
704} 828}
705 829
706static int 830static int
707lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 831lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
708 unsigned tgt_id, unsigned int lun, 832 unsigned tgt_id, unsigned int lun,
709 struct lpfc_rport_data *rdata) 833 struct lpfc_rport_data *rdata)
710{ 834{
835 struct lpfc_hba *phba = vport->phba;
711 struct lpfc_iocbq *iocbq; 836 struct lpfc_iocbq *iocbq;
712 struct lpfc_iocbq *iocbqrsp; 837 struct lpfc_iocbq *iocbqrsp;
713 int ret; 838 int ret;
@@ -716,12 +841,11 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
716 return FAILED; 841 return FAILED;
717 842
718 lpfc_cmd->rdata = rdata; 843 lpfc_cmd->rdata = rdata;
719 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, 844 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
720 FCP_TARGET_RESET); 845 FCP_TARGET_RESET);
721 if (!ret) 846 if (!ret)
722 return FAILED; 847 return FAILED;
723 848
724 lpfc_cmd->scsi_hba = phba;
725 iocbq = &lpfc_cmd->cur_iocbq; 849 iocbq = &lpfc_cmd->cur_iocbq;
726 iocbqrsp = lpfc_sli_get_iocbq(phba); 850 iocbqrsp = lpfc_sli_get_iocbq(phba);
727 851
@@ -730,10 +854,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
730 854
731 /* Issue Target Reset to TGT <num> */ 855 /* Issue Target Reset to TGT <num> */
732 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 856 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
733 "%d:0702 Issue Target Reset to TGT %d " 857 "%d (%d):0702 Issue Target Reset to TGT %d "
734 "Data: x%x x%x\n", 858 "Data: x%x x%x\n",
735 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, 859 phba->brd_no, vport->vpi, tgt_id,
736 rdata->pnode->nlp_flag); 860 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
737 861
738 ret = lpfc_sli_issue_iocb_wait(phba, 862 ret = lpfc_sli_issue_iocb_wait(phba,
739 &phba->sli.ring[phba->sli.fcp_ring], 863 &phba->sli.ring[phba->sli.fcp_ring],
@@ -758,7 +882,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
758const char * 882const char *
759lpfc_info(struct Scsi_Host *host) 883lpfc_info(struct Scsi_Host *host)
760{ 884{
761 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; 885 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
886 struct lpfc_hba *phba = vport->phba;
762 int len; 887 int len;
763 static char lpfcinfobuf[384]; 888 static char lpfcinfobuf[384];
764 889
@@ -800,26 +925,22 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
800 925
801void lpfc_poll_timeout(unsigned long ptr) 926void lpfc_poll_timeout(unsigned long ptr)
802{ 927{
803 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 928 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
804 unsigned long iflag;
805
806 spin_lock_irqsave(phba->host->host_lock, iflag);
807 929
808 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 930 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
809 lpfc_sli_poll_fcp_ring (phba); 931 lpfc_sli_poll_fcp_ring (phba);
810 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 932 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
811 lpfc_poll_rearm_timer(phba); 933 lpfc_poll_rearm_timer(phba);
812 } 934 }
813
814 spin_unlock_irqrestore(phba->host->host_lock, iflag);
815} 935}
816 936
817static int 937static int
818lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 938lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
819{ 939{
820 struct lpfc_hba *phba = 940 struct Scsi_Host *shost = cmnd->device->host;
821 (struct lpfc_hba *) cmnd->device->host->hostdata; 941 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
822 struct lpfc_sli *psli = &phba->sli; 942 struct lpfc_hba *phba = vport->phba;
943 struct lpfc_sli *psli = &phba->sli;
823 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 944 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
824 struct lpfc_nodelist *ndlp = rdata->pnode; 945 struct lpfc_nodelist *ndlp = rdata->pnode;
825 struct lpfc_scsi_buf *lpfc_cmd; 946 struct lpfc_scsi_buf *lpfc_cmd;
@@ -840,11 +961,14 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
840 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 961 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
841 goto out_fail_command; 962 goto out_fail_command;
842 } 963 }
843 lpfc_cmd = lpfc_get_scsi_buf (phba); 964 lpfc_cmd = lpfc_get_scsi_buf(phba);
844 if (lpfc_cmd == NULL) { 965 if (lpfc_cmd == NULL) {
966 lpfc_adjust_queue_depth(phba);
967
845 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 968 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
846 "%d:0707 driver's buffer pool is empty, " 969 "%d (%d):0707 driver's buffer pool is empty, "
847 "IO busied\n", phba->brd_no); 970 "IO busied\n",
971 phba->brd_no, vport->vpi);
848 goto out_host_busy; 972 goto out_host_busy;
849 } 973 }
850 974
@@ -862,10 +986,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
862 if (err) 986 if (err)
863 goto out_host_busy_free_buf; 987 goto out_host_busy_free_buf;
864 988
865 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp); 989 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
866 990
867 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 991 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
868 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 992 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
869 if (err) 993 if (err)
870 goto out_host_busy_free_buf; 994 goto out_host_busy_free_buf;
871 995
@@ -907,8 +1031,9 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
907static int 1031static int
908lpfc_abort_handler(struct scsi_cmnd *cmnd) 1032lpfc_abort_handler(struct scsi_cmnd *cmnd)
909{ 1033{
910 struct Scsi_Host *shost = cmnd->device->host; 1034 struct Scsi_Host *shost = cmnd->device->host;
911 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1035 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1036 struct lpfc_hba *phba = vport->phba;
912 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 1037 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
913 struct lpfc_iocbq *iocb; 1038 struct lpfc_iocbq *iocb;
914 struct lpfc_iocbq *abtsiocb; 1039 struct lpfc_iocbq *abtsiocb;
@@ -918,8 +1043,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
918 int ret = SUCCESS; 1043 int ret = SUCCESS;
919 1044
920 lpfc_block_error_handler(cmnd); 1045 lpfc_block_error_handler(cmnd);
921 spin_lock_irq(shost->host_lock);
922
923 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 1046 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
924 BUG_ON(!lpfc_cmd); 1047 BUG_ON(!lpfc_cmd);
925 1048
@@ -956,12 +1079,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
956 1079
957 icmd->ulpLe = 1; 1080 icmd->ulpLe = 1;
958 icmd->ulpClass = cmd->ulpClass; 1081 icmd->ulpClass = cmd->ulpClass;
959 if (phba->hba_state >= LPFC_LINK_UP) 1082 if (lpfc_is_link_up(phba))
960 icmd->ulpCommand = CMD_ABORT_XRI_CN; 1083 icmd->ulpCommand = CMD_ABORT_XRI_CN;
961 else 1084 else
962 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 1085 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
963 1086
964 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 1087 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1088 abtsiocb->vport = vport;
965 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 1089 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
966 lpfc_sli_release_iocbq(phba, abtsiocb); 1090 lpfc_sli_release_iocbq(phba, abtsiocb);
967 ret = FAILED; 1091 ret = FAILED;
@@ -977,9 +1101,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
977 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1101 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
978 lpfc_sli_poll_fcp_ring (phba); 1102 lpfc_sli_poll_fcp_ring (phba);
979 1103
980 spin_unlock_irq(phba->host->host_lock); 1104 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ);
981 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
982 spin_lock_irq(phba->host->host_lock);
983 if (++loop_count 1105 if (++loop_count
984 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT) 1106 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
985 break; 1107 break;
@@ -988,30 +1110,30 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
988 if (lpfc_cmd->pCmd == cmnd) { 1110 if (lpfc_cmd->pCmd == cmnd) {
989 ret = FAILED; 1111 ret = FAILED;
990 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1112 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
991 "%d:0748 abort handler timed out waiting for " 1113 "%d (%d):0748 abort handler timed out waiting "
992 "abort to complete: ret %#x, ID %d, LUN %d, " 1114 "for abort to complete: ret %#x, ID %d, "
993 "snum %#lx\n", 1115 "LUN %d, snum %#lx\n",
994 phba->brd_no, ret, cmnd->device->id, 1116 phba->brd_no, vport->vpi, ret,
995 cmnd->device->lun, cmnd->serial_number); 1117 cmnd->device->id, cmnd->device->lun,
1118 cmnd->serial_number);
996 } 1119 }
997 1120
998 out: 1121 out:
999 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1122 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1000 "%d:0749 SCSI Layer I/O Abort Request " 1123 "%d (%d):0749 SCSI Layer I/O Abort Request "
1001 "Status x%x ID %d LUN %d snum %#lx\n", 1124 "Status x%x ID %d LUN %d snum %#lx\n",
1002 phba->brd_no, ret, cmnd->device->id, 1125 phba->brd_no, vport->vpi, ret, cmnd->device->id,
1003 cmnd->device->lun, cmnd->serial_number); 1126 cmnd->device->lun, cmnd->serial_number);
1004 1127
1005 spin_unlock_irq(shost->host_lock);
1006
1007 return ret; 1128 return ret;
1008} 1129}
1009 1130
1010static int 1131static int
1011lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 1132lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1012{ 1133{
1013 struct Scsi_Host *shost = cmnd->device->host; 1134 struct Scsi_Host *shost = cmnd->device->host;
1014 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1135 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1136 struct lpfc_hba *phba = vport->phba;
1015 struct lpfc_scsi_buf *lpfc_cmd; 1137 struct lpfc_scsi_buf *lpfc_cmd;
1016 struct lpfc_iocbq *iocbq, *iocbqrsp; 1138 struct lpfc_iocbq *iocbq, *iocbqrsp;
1017 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1139 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@@ -1022,28 +1144,26 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1022 int cnt, loopcnt; 1144 int cnt, loopcnt;
1023 1145
1024 lpfc_block_error_handler(cmnd); 1146 lpfc_block_error_handler(cmnd);
1025 spin_lock_irq(shost->host_lock);
1026 loopcnt = 0; 1147 loopcnt = 0;
1027 /* 1148 /*
1028 * If target is not in a MAPPED state, delay the reset until 1149 * If target is not in a MAPPED state, delay the reset until
1029 * target is rediscovered or devloss timeout expires. 1150 * target is rediscovered or devloss timeout expires.
1030 */ 1151 */
1031 while ( 1 ) { 1152 while (1) {
1032 if (!pnode) 1153 if (!pnode)
1033 goto out; 1154 goto out;
1034 1155
1035 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1156 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1036 spin_unlock_irq(phba->host->host_lock);
1037 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1157 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1038 spin_lock_irq(phba->host->host_lock);
1039 loopcnt++; 1158 loopcnt++;
1040 rdata = cmnd->device->hostdata; 1159 rdata = cmnd->device->hostdata;
1041 if (!rdata || 1160 if (!rdata ||
1042 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) { 1161 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
1043 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1162 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1044 "%d:0721 LUN Reset rport failure:" 1163 "%d (%d):0721 LUN Reset rport "
1045 " cnt x%x rdata x%p\n", 1164 "failure: cnt x%x rdata x%p\n",
1046 phba->brd_no, loopcnt, rdata); 1165 phba->brd_no, vport->vpi,
1166 loopcnt, rdata);
1047 goto out; 1167 goto out;
1048 } 1168 }
1049 pnode = rdata->pnode; 1169 pnode = rdata->pnode;
@@ -1054,15 +1174,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1054 break; 1174 break;
1055 } 1175 }
1056 1176
1057 lpfc_cmd = lpfc_get_scsi_buf (phba); 1177 lpfc_cmd = lpfc_get_scsi_buf(phba);
1058 if (lpfc_cmd == NULL) 1178 if (lpfc_cmd == NULL)
1059 goto out; 1179 goto out;
1060 1180
1061 lpfc_cmd->timeout = 60; 1181 lpfc_cmd->timeout = 60;
1062 lpfc_cmd->scsi_hba = phba;
1063 lpfc_cmd->rdata = rdata; 1182 lpfc_cmd->rdata = rdata;
1064 1183
1065 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, 1184 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
1066 FCP_TARGET_RESET); 1185 FCP_TARGET_RESET);
1067 if (!ret) 1186 if (!ret)
1068 goto out_free_scsi_buf; 1187 goto out_free_scsi_buf;
@@ -1075,8 +1194,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1075 goto out_free_scsi_buf; 1194 goto out_free_scsi_buf;
1076 1195
1077 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1196 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1078 "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x " 1197 "%d (%d):0703 Issue target reset to TGT %d LUN %d "
1079 "nlp_flag x%x\n", phba->brd_no, cmnd->device->id, 1198 "rpi x%x nlp_flag x%x\n",
1199 phba->brd_no, vport->vpi, cmnd->device->id,
1080 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1200 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1081 1201
1082 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1202 iocb_status = lpfc_sli_issue_iocb_wait(phba,
@@ -1111,9 +1231,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1111 0, LPFC_CTX_LUN); 1231 0, LPFC_CTX_LUN);
1112 loopcnt = 0; 1232 loopcnt = 0;
1113 while(cnt) { 1233 while(cnt) {
1114 spin_unlock_irq(phba->host->host_lock);
1115 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1234 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1116 spin_lock_irq(phba->host->host_lock);
1117 1235
1118 if (++loopcnt 1236 if (++loopcnt
1119 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1237 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1127,8 +1245,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1127 1245
1128 if (cnt) { 1246 if (cnt) {
1129 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1247 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1130 "%d:0719 device reset I/O flush failure: cnt x%x\n", 1248 "%d (%d):0719 device reset I/O flush failure: "
1131 phba->brd_no, cnt); 1249 "cnt x%x\n",
1250 phba->brd_no, vport->vpi, cnt);
1132 ret = FAILED; 1251 ret = FAILED;
1133 } 1252 }
1134 1253
@@ -1137,21 +1256,21 @@ out_free_scsi_buf:
1137 lpfc_release_scsi_buf(phba, lpfc_cmd); 1256 lpfc_release_scsi_buf(phba, lpfc_cmd);
1138 } 1257 }
1139 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1258 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1140 "%d:0713 SCSI layer issued device reset (%d, %d) " 1259 "%d (%d):0713 SCSI layer issued device reset (%d, %d) "
1141 "return x%x status x%x result x%x\n", 1260 "return x%x status x%x result x%x\n",
1142 phba->brd_no, cmnd->device->id, cmnd->device->lun, 1261 phba->brd_no, vport->vpi, cmnd->device->id,
1143 ret, cmd_status, cmd_result); 1262 cmnd->device->lun, ret, cmd_status, cmd_result);
1144 1263
1145out: 1264out:
1146 spin_unlock_irq(shost->host_lock);
1147 return ret; 1265 return ret;
1148} 1266}
1149 1267
1150static int 1268static int
1151lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 1269lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1152{ 1270{
1153 struct Scsi_Host *shost = cmnd->device->host; 1271 struct Scsi_Host *shost = cmnd->device->host;
1154 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1272 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1273 struct lpfc_hba *phba = vport->phba;
1155 struct lpfc_nodelist *ndlp = NULL; 1274 struct lpfc_nodelist *ndlp = NULL;
1156 int match; 1275 int match;
1157 int ret = FAILED, i, err_count = 0; 1276 int ret = FAILED, i, err_count = 0;
@@ -1159,7 +1278,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1159 struct lpfc_scsi_buf * lpfc_cmd; 1278 struct lpfc_scsi_buf * lpfc_cmd;
1160 1279
1161 lpfc_block_error_handler(cmnd); 1280 lpfc_block_error_handler(cmnd);
1162 spin_lock_irq(shost->host_lock);
1163 1281
1164 lpfc_cmd = lpfc_get_scsi_buf(phba); 1282 lpfc_cmd = lpfc_get_scsi_buf(phba);
1165 if (lpfc_cmd == NULL) 1283 if (lpfc_cmd == NULL)
@@ -1167,7 +1285,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1167 1285
1168 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1286 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1169 lpfc_cmd->timeout = 60; 1287 lpfc_cmd->timeout = 60;
1170 lpfc_cmd->scsi_hba = phba;
1171 1288
1172 /* 1289 /*
1173 * Since the driver manages a single bus device, reset all 1290 * Since the driver manages a single bus device, reset all
@@ -1177,7 +1294,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1177 for (i = 0; i < LPFC_MAX_TARGET; i++) { 1294 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1178 /* Search for mapped node by target ID */ 1295 /* Search for mapped node by target ID */
1179 match = 0; 1296 match = 0;
1180 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 1297 spin_lock_irq(shost->host_lock);
1298 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1181 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1299 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1182 i == ndlp->nlp_sid && 1300 i == ndlp->nlp_sid &&
1183 ndlp->rport) { 1301 ndlp->rport) {
@@ -1185,15 +1303,18 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1185 break; 1303 break;
1186 } 1304 }
1187 } 1305 }
1306 spin_unlock_irq(shost->host_lock);
1188 if (!match) 1307 if (!match)
1189 continue; 1308 continue;
1190 1309
1191 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, 1310 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1311 cmnd->device->lun,
1192 ndlp->rport->dd_data); 1312 ndlp->rport->dd_data);
1193 if (ret != SUCCESS) { 1313 if (ret != SUCCESS) {
1194 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1314 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1195 "%d:0700 Bus Reset on target %d failed\n", 1315 "%d (%d):0700 Bus Reset on target %d "
1196 phba->brd_no, i); 1316 "failed\n",
1317 phba->brd_no, vport->vpi, i);
1197 err_count++; 1318 err_count++;
1198 break; 1319 break;
1199 } 1320 }
@@ -1219,9 +1340,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1219 0, 0, 0, LPFC_CTX_HOST); 1340 0, 0, 0, LPFC_CTX_HOST);
1220 loopcnt = 0; 1341 loopcnt = 0;
1221 while(cnt) { 1342 while(cnt) {
1222 spin_unlock_irq(phba->host->host_lock);
1223 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1343 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1224 spin_lock_irq(phba->host->host_lock);
1225 1344
1226 if (++loopcnt 1345 if (++loopcnt
1227 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1346 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1234,25 +1353,24 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1234 1353
1235 if (cnt) { 1354 if (cnt) {
1236 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1355 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1237 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", 1356 "%d (%d):0715 Bus Reset I/O flush failure: "
1238 phba->brd_no, cnt, i); 1357 "cnt x%x left x%x\n",
1358 phba->brd_no, vport->vpi, cnt, i);
1239 ret = FAILED; 1359 ret = FAILED;
1240 } 1360 }
1241 1361
1242 lpfc_printf_log(phba, 1362 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1243 KERN_ERR, 1363 "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
1244 LOG_FCP, 1364 phba->brd_no, vport->vpi, ret);
1245 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1246 phba->brd_no, ret);
1247out: 1365out:
1248 spin_unlock_irq(shost->host_lock);
1249 return ret; 1366 return ret;
1250} 1367}
1251 1368
1252static int 1369static int
1253lpfc_slave_alloc(struct scsi_device *sdev) 1370lpfc_slave_alloc(struct scsi_device *sdev)
1254{ 1371{
1255 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata; 1372 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1373 struct lpfc_hba *phba = vport->phba;
1256 struct lpfc_scsi_buf *scsi_buf = NULL; 1374 struct lpfc_scsi_buf *scsi_buf = NULL;
1257 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1375 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1258 uint32_t total = 0, i; 1376 uint32_t total = 0, i;
@@ -1273,27 +1391,35 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1273 */ 1391 */
1274 total = phba->total_scsi_bufs; 1392 total = phba->total_scsi_bufs;
1275 num_to_alloc = phba->cfg_lun_queue_depth + 2; 1393 num_to_alloc = phba->cfg_lun_queue_depth + 2;
1276 if (total >= phba->cfg_hba_queue_depth) { 1394
1395 /* Allow some exchanges to be available always to complete discovery */
1396 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1277 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1397 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1278 "%d:0704 At limitation of %d preallocated " 1398 "%d (%d):0704 At limitation of %d "
1279 "command buffers\n", phba->brd_no, total); 1399 "preallocated command buffers\n",
1400 phba->brd_no, vport->vpi, total);
1280 return 0; 1401 return 0;
1281 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1402
1403 /* Allow some exchanges to be available always to complete discovery */
1404 } else if (total + num_to_alloc >
1405 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1282 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1406 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1283 "%d:0705 Allocation request of %d command " 1407 "%d (%d):0705 Allocation request of %d "
1284 "buffers will exceed max of %d. Reducing " 1408 "command buffers will exceed max of %d. "
1285 "allocation request to %d.\n", phba->brd_no, 1409 "Reducing allocation request to %d.\n",
1286 num_to_alloc, phba->cfg_hba_queue_depth, 1410 phba->brd_no, vport->vpi, num_to_alloc,
1411 phba->cfg_hba_queue_depth,
1287 (phba->cfg_hba_queue_depth - total)); 1412 (phba->cfg_hba_queue_depth - total));
1288 num_to_alloc = phba->cfg_hba_queue_depth - total; 1413 num_to_alloc = phba->cfg_hba_queue_depth - total;
1289 } 1414 }
1290 1415
1291 for (i = 0; i < num_to_alloc; i++) { 1416 for (i = 0; i < num_to_alloc; i++) {
1292 scsi_buf = lpfc_new_scsi_buf(phba); 1417 scsi_buf = lpfc_new_scsi_buf(vport);
1293 if (!scsi_buf) { 1418 if (!scsi_buf) {
1294 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1419 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1295 "%d:0706 Failed to allocate command " 1420 "%d (%d):0706 Failed to allocate "
1296 "buffer\n", phba->brd_no); 1421 "command buffer\n",
1422 phba->brd_no, vport->vpi);
1297 break; 1423 break;
1298 } 1424 }
1299 1425
@@ -1308,8 +1434,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1308static int 1434static int
1309lpfc_slave_configure(struct scsi_device *sdev) 1435lpfc_slave_configure(struct scsi_device *sdev)
1310{ 1436{
1311 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata; 1437 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1312 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1438 struct lpfc_hba *phba = vport->phba;
1439 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1313 1440
1314 if (sdev->tagged_supported) 1441 if (sdev->tagged_supported)
1315 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); 1442 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
@@ -1340,6 +1467,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
1340 return; 1467 return;
1341} 1468}
1342 1469
1470
1343struct scsi_host_template lpfc_template = { 1471struct scsi_host_template lpfc_template = {
1344 .module = THIS_MODULE, 1472 .module = THIS_MODULE,
1345 .name = LPFC_DRIVER_NAME, 1473 .name = LPFC_DRIVER_NAME,
@@ -1352,11 +1480,10 @@ struct scsi_host_template lpfc_template = {
1352 .slave_configure = lpfc_slave_configure, 1480 .slave_configure = lpfc_slave_configure,
1353 .slave_destroy = lpfc_slave_destroy, 1481 .slave_destroy = lpfc_slave_destroy,
1354 .scan_finished = lpfc_scan_finished, 1482 .scan_finished = lpfc_scan_finished,
1355 .scan_start = lpfc_scan_start,
1356 .this_id = -1, 1483 .this_id = -1,
1357 .sg_tablesize = LPFC_SG_SEG_CNT, 1484 .sg_tablesize = LPFC_SG_SEG_CNT,
1358 .cmd_per_lun = LPFC_CMD_PER_LUN, 1485 .cmd_per_lun = LPFC_CMD_PER_LUN,
1359 .use_clustering = ENABLE_CLUSTERING, 1486 .use_clustering = ENABLE_CLUSTERING,
1360 .shost_attrs = lpfc_host_attrs, 1487 .shost_attrs = lpfc_hba_attrs,
1361 .max_sectors = 0xFFFF, 1488 .max_sectors = 0xFFFF,
1362}; 1489};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index cdcd2535803f..31787bb6d53e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -110,7 +110,6 @@ struct fcp_cmnd {
110struct lpfc_scsi_buf { 110struct lpfc_scsi_buf {
111 struct list_head list; 111 struct list_head list;
112 struct scsi_cmnd *pCmd; 112 struct scsi_cmnd *pCmd;
113 struct lpfc_hba *scsi_hba;
114 struct lpfc_rport_data *rdata; 113 struct lpfc_rport_data *rdata;
115 114
116 uint32_t timeout; 115 uint32_t timeout;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a1e721459e2b..f4d5a6b00fde 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -38,23 +38,25 @@
38#include "lpfc_crtn.h" 38#include "lpfc_crtn.h"
39#include "lpfc_logmsg.h" 39#include "lpfc_logmsg.h"
40#include "lpfc_compat.h" 40#include "lpfc_compat.h"
41#include "lpfc_debugfs.h"
41 42
42/* 43/*
43 * Define macro to log: Mailbox command x%x cannot issue Data 44 * Define macro to log: Mailbox command x%x cannot issue Data
44 * This allows multiple uses of lpfc_msgBlk0311 45 * This allows multiple uses of lpfc_msgBlk0311
45 * w/o perturbing log msg utility. 46 * w/o perturbing log msg utility.
46 */ 47 */
47#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \ 48#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
48 lpfc_printf_log(phba, \ 49 lpfc_printf_log(phba, \
49 KERN_INFO, \ 50 KERN_INFO, \
50 LOG_MBOX | LOG_SLI, \ 51 LOG_MBOX | LOG_SLI, \
51 "%d:0311 Mailbox command x%x cannot issue " \ 52 "%d (%d):0311 Mailbox command x%x cannot " \
52 "Data: x%x x%x x%x\n", \ 53 "issue Data: x%x x%x x%x\n", \
53 phba->brd_no, \ 54 phba->brd_no, \
54 mb->mbxCommand, \ 55 pmbox->vport ? pmbox->vport->vpi : 0, \
55 phba->hba_state, \ 56 pmbox->mb.mbxCommand, \
57 phba->pport->port_state, \
56 psli->sli_flag, \ 58 psli->sli_flag, \
57 flag); 59 flag)
58 60
59 61
60/* There are only four IOCB completion types. */ 62/* There are only four IOCB completion types. */
@@ -65,8 +67,26 @@ typedef enum _lpfc_iocb_type {
65 LPFC_ABORT_IOCB 67 LPFC_ABORT_IOCB
66} lpfc_iocb_type; 68} lpfc_iocb_type;
67 69
68struct lpfc_iocbq * 70 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
69lpfc_sli_get_iocbq(struct lpfc_hba * phba) 71 * to the start of the ring, and the slot number of the
72 * desired iocb entry, calc a pointer to that entry.
73 */
74static inline IOCB_t *
75lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
76{
77 return (IOCB_t *) (((char *) pring->cmdringaddr) +
78 pring->cmdidx * phba->iocb_cmd_size);
79}
80
81static inline IOCB_t *
82lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
83{
84 return (IOCB_t *) (((char *) pring->rspringaddr) +
85 pring->rspidx * phba->iocb_rsp_size);
86}
87
88static struct lpfc_iocbq *
89__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
70{ 90{
71 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 91 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
72 struct lpfc_iocbq * iocbq = NULL; 92 struct lpfc_iocbq * iocbq = NULL;
@@ -75,10 +95,22 @@ lpfc_sli_get_iocbq(struct lpfc_hba * phba)
75 return iocbq; 95 return iocbq;
76} 96}
77 97
98struct lpfc_iocbq *
99lpfc_sli_get_iocbq(struct lpfc_hba *phba)
100{
101 struct lpfc_iocbq * iocbq = NULL;
102 unsigned long iflags;
103
104 spin_lock_irqsave(&phba->hbalock, iflags);
105 iocbq = __lpfc_sli_get_iocbq(phba);
106 spin_unlock_irqrestore(&phba->hbalock, iflags);
107 return iocbq;
108}
109
78void 110void
79lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 111__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
80{ 112{
81 size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb); 113 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
82 114
83 /* 115 /*
84 * Clean all volatile data fields, preserve iotag and node struct. 116 * Clean all volatile data fields, preserve iotag and node struct.
@@ -87,6 +119,19 @@ lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
87 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 119 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
88} 120}
89 121
122void
123lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
124{
125 unsigned long iflags;
126
127 /*
128 * Clean all volatile data fields, preserve iotag and node struct.
129 */
130 spin_lock_irqsave(&phba->hbalock, iflags);
131 __lpfc_sli_release_iocbq(phba, iocbq);
132 spin_unlock_irqrestore(&phba->hbalock, iflags);
133}
134
90/* 135/*
91 * Translate the iocb command to an iocb command type used to decide the final 136 * Translate the iocb command to an iocb command type used to decide the final
92 * disposition of each completed IOCB. 137 * disposition of each completed IOCB.
@@ -155,6 +200,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
155 case CMD_RCV_ELS_REQ_CX: 200 case CMD_RCV_ELS_REQ_CX:
156 case CMD_RCV_SEQUENCE64_CX: 201 case CMD_RCV_SEQUENCE64_CX:
157 case CMD_RCV_ELS_REQ64_CX: 202 case CMD_RCV_ELS_REQ64_CX:
203 case CMD_IOCB_RCV_SEQ64_CX:
204 case CMD_IOCB_RCV_ELS64_CX:
205 case CMD_IOCB_RCV_CONT64_CX:
158 type = LPFC_UNSOL_IOCB; 206 type = LPFC_UNSOL_IOCB;
159 break; 207 break;
160 default: 208 default:
@@ -166,73 +214,77 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
166} 214}
167 215
168static int 216static int
169lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb) 217lpfc_sli_ring_map(struct lpfc_hba *phba)
170{ 218{
171 struct lpfc_sli *psli = &phba->sli; 219 struct lpfc_sli *psli = &phba->sli;
172 MAILBOX_t *pmbox = &pmb->mb; 220 LPFC_MBOXQ_t *pmb;
173 int i, rc; 221 MAILBOX_t *pmbox;
222 int i, rc, ret = 0;
174 223
224 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
225 if (!pmb)
226 return -ENOMEM;
227 pmbox = &pmb->mb;
228 phba->link_state = LPFC_INIT_MBX_CMDS;
175 for (i = 0; i < psli->num_rings; i++) { 229 for (i = 0; i < psli->num_rings; i++) {
176 phba->hba_state = LPFC_INIT_MBX_CMDS;
177 lpfc_config_ring(phba, i, pmb); 230 lpfc_config_ring(phba, i, pmb);
178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 231 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179 if (rc != MBX_SUCCESS) { 232 if (rc != MBX_SUCCESS) {
180 lpfc_printf_log(phba, 233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
181 KERN_ERR, 234 "%d:0446 Adapter failed to init (%d), "
182 LOG_INIT,
183 "%d:0446 Adapter failed to init, "
184 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 235 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
185 "ring %d\n", 236 "ring %d\n",
186 phba->brd_no, 237 phba->brd_no, rc,
187 pmbox->mbxCommand, 238 pmbox->mbxCommand,
188 pmbox->mbxStatus, 239 pmbox->mbxStatus,
189 i); 240 i);
190 phba->hba_state = LPFC_HBA_ERROR; 241 phba->link_state = LPFC_HBA_ERROR;
191 return -ENXIO; 242 ret = -ENXIO;
243 break;
192 } 244 }
193 } 245 }
194 return 0; 246 mempool_free(pmb, phba->mbox_mem_pool);
247 return ret;
195} 248}
196 249
197static int 250static int
198lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 251lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
199 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 252 struct lpfc_iocbq *piocb)
200{ 253{
201 list_add_tail(&piocb->list, &pring->txcmplq); 254 list_add_tail(&piocb->list, &pring->txcmplq);
202 pring->txcmplq_cnt++; 255 pring->txcmplq_cnt++;
203 if (unlikely(pring->ringno == LPFC_ELS_RING)) 256 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
204 mod_timer(&phba->els_tmofunc, 257 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
205 jiffies + HZ * (phba->fc_ratov << 1)); 258 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
259 if (!piocb->vport)
260 BUG();
261 else
262 mod_timer(&piocb->vport->els_tmofunc,
263 jiffies + HZ * (phba->fc_ratov << 1));
264 }
206 265
207 return (0); 266
267 return 0;
208} 268}
209 269
210static struct lpfc_iocbq * 270static struct lpfc_iocbq *
211lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 271lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
212{ 272{
213 struct list_head *dlp;
214 struct lpfc_iocbq *cmd_iocb; 273 struct lpfc_iocbq *cmd_iocb;
215 274
216 dlp = &pring->txq; 275 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
217 cmd_iocb = NULL; 276 if (cmd_iocb != NULL)
218 list_remove_head((&pring->txq), cmd_iocb,
219 struct lpfc_iocbq,
220 list);
221 if (cmd_iocb) {
222 /* If the first ptr is not equal to the list header,
223 * deque the IOCBQ_t and return it.
224 */
225 pring->txq_cnt--; 277 pring->txq_cnt--;
226 } 278 return cmd_iocb;
227 return (cmd_iocb);
228} 279}
229 280
230static IOCB_t * 281static IOCB_t *
231lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 282lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
232{ 283{
233 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 284 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
285 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
286 &phba->slim2p->mbx.us.s2.port[pring->ringno];
234 uint32_t max_cmd_idx = pring->numCiocb; 287 uint32_t max_cmd_idx = pring->numCiocb;
235 IOCB_t *iocb = NULL;
236 288
237 if ((pring->next_cmdidx == pring->cmdidx) && 289 if ((pring->next_cmdidx == pring->cmdidx) &&
238 (++pring->next_cmdidx >= max_cmd_idx)) 290 (++pring->next_cmdidx >= max_cmd_idx))
@@ -249,15 +301,17 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
249 phba->brd_no, pring->ringno, 301 phba->brd_no, pring->ringno,
250 pring->local_getidx, max_cmd_idx); 302 pring->local_getidx, max_cmd_idx);
251 303
252 phba->hba_state = LPFC_HBA_ERROR; 304 phba->link_state = LPFC_HBA_ERROR;
253 /* 305 /*
254 * All error attention handlers are posted to 306 * All error attention handlers are posted to
255 * worker thread 307 * worker thread
256 */ 308 */
257 phba->work_ha |= HA_ERATT; 309 phba->work_ha |= HA_ERATT;
258 phba->work_hs = HS_FFER3; 310 phba->work_hs = HS_FFER3;
311
312 /* hbalock should already be held */
259 if (phba->work_wait) 313 if (phba->work_wait)
260 wake_up(phba->work_wait); 314 lpfc_worker_wake_up(phba);
261 315
262 return NULL; 316 return NULL;
263 } 317 }
@@ -266,39 +320,34 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
266 return NULL; 320 return NULL;
267 } 321 }
268 322
269 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx); 323 return lpfc_cmd_iocb(phba, pring);
270
271 return iocb;
272} 324}
273 325
274uint16_t 326uint16_t
275lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 327lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
276{ 328{
277 struct lpfc_iocbq ** new_arr; 329 struct lpfc_iocbq **new_arr;
278 struct lpfc_iocbq ** old_arr; 330 struct lpfc_iocbq **old_arr;
279 size_t new_len; 331 size_t new_len;
280 struct lpfc_sli *psli = &phba->sli; 332 struct lpfc_sli *psli = &phba->sli;
281 uint16_t iotag; 333 uint16_t iotag;
282 334
283 spin_lock_irq(phba->host->host_lock); 335 spin_lock_irq(&phba->hbalock);
284 iotag = psli->last_iotag; 336 iotag = psli->last_iotag;
285 if(++iotag < psli->iocbq_lookup_len) { 337 if(++iotag < psli->iocbq_lookup_len) {
286 psli->last_iotag = iotag; 338 psli->last_iotag = iotag;
287 psli->iocbq_lookup[iotag] = iocbq; 339 psli->iocbq_lookup[iotag] = iocbq;
288 spin_unlock_irq(phba->host->host_lock); 340 spin_unlock_irq(&phba->hbalock);
289 iocbq->iotag = iotag; 341 iocbq->iotag = iotag;
290 return iotag; 342 return iotag;
291 } 343 } else if (psli->iocbq_lookup_len < (0xffff
292 else if (psli->iocbq_lookup_len < (0xffff
293 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 344 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
294 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 345 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
295 spin_unlock_irq(phba->host->host_lock); 346 spin_unlock_irq(&phba->hbalock);
296 new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *), 347 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
297 GFP_KERNEL); 348 GFP_KERNEL);
298 if (new_arr) { 349 if (new_arr) {
299 memset((char *)new_arr, 0, 350 spin_lock_irq(&phba->hbalock);
300 new_len * sizeof (struct lpfc_iocbq *));
301 spin_lock_irq(phba->host->host_lock);
302 old_arr = psli->iocbq_lookup; 351 old_arr = psli->iocbq_lookup;
303 if (new_len <= psli->iocbq_lookup_len) { 352 if (new_len <= psli->iocbq_lookup_len) {
304 /* highly unprobable case */ 353 /* highly unprobable case */
@@ -307,11 +356,11 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
307 if(++iotag < psli->iocbq_lookup_len) { 356 if(++iotag < psli->iocbq_lookup_len) {
308 psli->last_iotag = iotag; 357 psli->last_iotag = iotag;
309 psli->iocbq_lookup[iotag] = iocbq; 358 psli->iocbq_lookup[iotag] = iocbq;
310 spin_unlock_irq(phba->host->host_lock); 359 spin_unlock_irq(&phba->hbalock);
311 iocbq->iotag = iotag; 360 iocbq->iotag = iotag;
312 return iotag; 361 return iotag;
313 } 362 }
314 spin_unlock_irq(phba->host->host_lock); 363 spin_unlock_irq(&phba->hbalock);
315 return 0; 364 return 0;
316 } 365 }
317 if (psli->iocbq_lookup) 366 if (psli->iocbq_lookup)
@@ -322,13 +371,13 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
322 psli->iocbq_lookup_len = new_len; 371 psli->iocbq_lookup_len = new_len;
323 psli->last_iotag = iotag; 372 psli->last_iotag = iotag;
324 psli->iocbq_lookup[iotag] = iocbq; 373 psli->iocbq_lookup[iotag] = iocbq;
325 spin_unlock_irq(phba->host->host_lock); 374 spin_unlock_irq(&phba->hbalock);
326 iocbq->iotag = iotag; 375 iocbq->iotag = iotag;
327 kfree(old_arr); 376 kfree(old_arr);
328 return iotag; 377 return iotag;
329 } 378 }
330 } else 379 } else
331 spin_unlock_irq(phba->host->host_lock); 380 spin_unlock_irq(&phba->hbalock);
332 381
333 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 382 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
334 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", 383 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -349,7 +398,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
349 /* 398 /*
350 * Issue iocb command to adapter 399 * Issue iocb command to adapter
351 */ 400 */
352 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t)); 401 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
353 wmb(); 402 wmb();
354 pring->stats.iocb_cmd++; 403 pring->stats.iocb_cmd++;
355 404
@@ -361,20 +410,18 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
361 if (nextiocb->iocb_cmpl) 410 if (nextiocb->iocb_cmpl)
362 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 411 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
363 else 412 else
364 lpfc_sli_release_iocbq(phba, nextiocb); 413 __lpfc_sli_release_iocbq(phba, nextiocb);
365 414
366 /* 415 /*
367 * Let the HBA know what IOCB slot will be the next one the 416 * Let the HBA know what IOCB slot will be the next one the
368 * driver will put a command into. 417 * driver will put a command into.
369 */ 418 */
370 pring->cmdidx = pring->next_cmdidx; 419 pring->cmdidx = pring->next_cmdidx;
371 writel(pring->cmdidx, phba->MBslimaddr 420 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
372 + (SLIMOFF + (pring->ringno * 2)) * 4);
373} 421}
374 422
375static void 423static void
376lpfc_sli_update_full_ring(struct lpfc_hba * phba, 424lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
377 struct lpfc_sli_ring *pring)
378{ 425{
379 int ringno = pring->ringno; 426 int ringno = pring->ringno;
380 427
@@ -393,8 +440,7 @@ lpfc_sli_update_full_ring(struct lpfc_hba * phba,
393} 440}
394 441
395static void 442static void
396lpfc_sli_update_ring(struct lpfc_hba * phba, 443lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
397 struct lpfc_sli_ring *pring)
398{ 444{
399 int ringno = pring->ringno; 445 int ringno = pring->ringno;
400 446
@@ -407,7 +453,7 @@ lpfc_sli_update_ring(struct lpfc_hba * phba,
407} 453}
408 454
409static void 455static void
410lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 456lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
411{ 457{
412 IOCB_t *iocb; 458 IOCB_t *iocb;
413 struct lpfc_iocbq *nextiocb; 459 struct lpfc_iocbq *nextiocb;
@@ -420,7 +466,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
420 * (d) IOCB processing is not blocked by the outstanding mbox command. 466 * (d) IOCB processing is not blocked by the outstanding mbox command.
421 */ 467 */
422 if (pring->txq_cnt && 468 if (pring->txq_cnt &&
423 (phba->hba_state > LPFC_LINK_DOWN) && 469 lpfc_is_link_up(phba) &&
424 (pring->ringno != phba->sli.fcp_ring || 470 (pring->ringno != phba->sli.fcp_ring ||
425 phba->sli.sli_flag & LPFC_PROCESS_LA) && 471 phba->sli.sli_flag & LPFC_PROCESS_LA) &&
426 !(pring->flag & LPFC_STOP_IOCB_MBX)) { 472 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
@@ -440,11 +486,15 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
440 486
441/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ 487/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
442static void 488static void
443lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno) 489lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
444{ 490{
445 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno]; 491 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
492 &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
493 &phba->slim2p->mbx.us.s2.port[ringno];
494 unsigned long iflags;
446 495
447 /* If the ring is active, flag it */ 496 /* If the ring is active, flag it */
497 spin_lock_irqsave(&phba->hbalock, iflags);
448 if (phba->sli.ring[ringno].cmdringaddr) { 498 if (phba->sli.ring[ringno].cmdringaddr) {
449 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) { 499 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
450 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX; 500 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
@@ -453,11 +503,176 @@ lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
453 */ 503 */
454 phba->sli.ring[ringno].local_getidx 504 phba->sli.ring[ringno].local_getidx
455 = le32_to_cpu(pgp->cmdGetInx); 505 = le32_to_cpu(pgp->cmdGetInx);
456 spin_lock_irq(phba->host->host_lock);
457 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]); 506 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
458 spin_unlock_irq(phba->host->host_lock);
459 } 507 }
460 } 508 }
509 spin_unlock_irqrestore(&phba->hbalock, iflags);
510}
511
512struct lpfc_hbq_entry *
513lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
514{
515 struct hbq_s *hbqp = &phba->hbqs[hbqno];
516
517 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
518 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
519 hbqp->next_hbqPutIdx = 0;
520
521 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
522 uint32_t raw_index = phba->hbq_get[hbqno];
523 uint32_t getidx = le32_to_cpu(raw_index);
524
525 hbqp->local_hbqGetIdx = getidx;
526
527 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
528 lpfc_printf_log(phba, KERN_ERR,
529 LOG_SLI | LOG_VPORT,
530 "%d:1802 HBQ %d: local_hbqGetIdx "
531 "%u is > than hbqp->entry_count %u\n",
532 phba->brd_no, hbqno,
533 hbqp->local_hbqGetIdx,
534 hbqp->entry_count);
535
536 phba->link_state = LPFC_HBA_ERROR;
537 return NULL;
538 }
539
540 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
541 return NULL;
542 }
543
544 return (struct lpfc_hbq_entry *) phba->hbqslimp.virt + hbqp->hbqPutIdx;
545}
546
547void
548lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
549{
550 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
551 struct hbq_dmabuf *hbq_buf;
552
553 /* Return all memory used by all HBQs */
554 list_for_each_entry_safe(dmabuf, next_dmabuf,
555 &phba->hbq_buffer_list, list) {
556 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
557 list_del(&hbq_buf->dbuf.list);
558 lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
559 kfree(hbq_buf);
560 }
561}
562
563static void
564lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
565 struct hbq_dmabuf *hbq_buf)
566{
567 struct lpfc_hbq_entry *hbqe;
568 dma_addr_t physaddr = hbq_buf->dbuf.phys;
569
570 /* Get next HBQ entry slot to use */
571 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
572 if (hbqe) {
573 struct hbq_s *hbqp = &phba->hbqs[hbqno];
574
575 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
576 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
577 hbqe->bde.tus.f.bdeSize = FCELSSIZE;
578 hbqe->bde.tus.f.bdeFlags = 0;
579 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
580 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
581 /* Sync SLIM */
582 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
583 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
584 /* flush */
585 readl(phba->hbq_put + hbqno);
586 list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
587 }
588}
589
590static struct lpfc_hbq_init lpfc_els_hbq = {
591 .rn = 1,
592 .entry_count = 200,
593 .mask_count = 0,
594 .profile = 0,
595 .ring_mask = 1 << LPFC_ELS_RING,
596 .buffer_count = 0,
597 .init_count = 20,
598 .add_count = 5,
599};
600
601static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
602 &lpfc_els_hbq,
603};
604
605int
606lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
607{
608 uint32_t i, start, end;
609 struct hbq_dmabuf *hbq_buffer;
610
611 start = lpfc_hbq_defs[hbqno]->buffer_count;
612 end = count + lpfc_hbq_defs[hbqno]->buffer_count;
613 if (end > lpfc_hbq_defs[hbqno]->entry_count) {
614 end = lpfc_hbq_defs[hbqno]->entry_count;
615 }
616
617 /* Populate HBQ entries */
618 for (i = start; i < end; i++) {
619 hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
620 GFP_KERNEL);
621 if (!hbq_buffer)
622 return 1;
623 hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
624 &hbq_buffer->dbuf.phys);
625 if (hbq_buffer->dbuf.virt == NULL)
626 return 1;
627 hbq_buffer->tag = (i | (hbqno << 16));
628 lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
629 lpfc_hbq_defs[hbqno]->buffer_count++;
630 }
631 return 0;
632}
633
634int
635lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
636{
637 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
638 lpfc_hbq_defs[qno]->add_count));
639}
640
641int
642lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
643{
644 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
645 lpfc_hbq_defs[qno]->init_count));
646}
647
648struct hbq_dmabuf *
649lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
650{
651 struct lpfc_dmabuf *d_buf;
652 struct hbq_dmabuf *hbq_buf;
653
654 list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
655 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
656 if ((hbq_buf->tag & 0xffff) == tag) {
657 return hbq_buf;
658 }
659 }
660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
661 "%d:1803 Bad hbq tag. Data: x%x x%x\n",
662 phba->brd_no, tag,
663 lpfc_hbq_defs[tag >> 16]->buffer_count);
664 return NULL;
665}
666
667void
668lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *sp)
669{
670 uint32_t hbqno;
671
672 if (sp) {
673 hbqno = sp->tag >> 16;
674 lpfc_sli_hbq_to_firmware(phba, hbqno, sp);
675 }
461} 676}
462 677
463static int 678static int
@@ -511,32 +726,38 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
511 case MBX_FLASH_WR_ULA: 726 case MBX_FLASH_WR_ULA:
512 case MBX_SET_DEBUG: 727 case MBX_SET_DEBUG:
513 case MBX_LOAD_EXP_ROM: 728 case MBX_LOAD_EXP_ROM:
729 case MBX_REG_VPI:
730 case MBX_UNREG_VPI:
731 case MBX_HEARTBEAT:
514 ret = mbxCommand; 732 ret = mbxCommand;
515 break; 733 break;
516 default: 734 default:
517 ret = MBX_SHUTDOWN; 735 ret = MBX_SHUTDOWN;
518 break; 736 break;
519 } 737 }
520 return (ret); 738 return ret;
521} 739}
522static void 740static void
523lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 741lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
524{ 742{
525 wait_queue_head_t *pdone_q; 743 wait_queue_head_t *pdone_q;
744 unsigned long drvr_flag;
526 745
527 /* 746 /*
528 * If pdone_q is empty, the driver thread gave up waiting and 747 * If pdone_q is empty, the driver thread gave up waiting and
529 * continued running. 748 * continued running.
530 */ 749 */
531 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 750 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
751 spin_lock_irqsave(&phba->hbalock, drvr_flag);
532 pdone_q = (wait_queue_head_t *) pmboxq->context1; 752 pdone_q = (wait_queue_head_t *) pmboxq->context1;
533 if (pdone_q) 753 if (pdone_q)
534 wake_up_interruptible(pdone_q); 754 wake_up_interruptible(pdone_q);
755 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
535 return; 756 return;
536} 757}
537 758
538void 759void
539lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 760lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
540{ 761{
541 struct lpfc_dmabuf *mp; 762 struct lpfc_dmabuf *mp;
542 uint16_t rpi; 763 uint16_t rpi;
@@ -553,79 +774,64 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
553 * If a REG_LOGIN succeeded after node is destroyed or node 774 * If a REG_LOGIN succeeded after node is destroyed or node
554 * is in re-discovery driver need to cleanup the RPI. 775 * is in re-discovery driver need to cleanup the RPI.
555 */ 776 */
556 if (!(phba->fc_flag & FC_UNLOADING) && 777 if (!(phba->pport->load_flag & FC_UNLOADING) &&
557 (pmb->mb.mbxCommand == MBX_REG_LOGIN64) && 778 pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
558 (!pmb->mb.mbxStatus)) { 779 !pmb->mb.mbxStatus) {
559 780
560 rpi = pmb->mb.un.varWords[0]; 781 rpi = pmb->mb.un.varWords[0];
561 lpfc_unreg_login(phba, rpi, pmb); 782 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
562 pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 783 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
563 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 784 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
564 if (rc != MBX_NOT_FINISHED) 785 if (rc != MBX_NOT_FINISHED)
565 return; 786 return;
566 } 787 }
567 788
568 mempool_free( pmb, phba->mbox_mem_pool); 789 mempool_free(pmb, phba->mbox_mem_pool);
569 return; 790 return;
570} 791}
571 792
572int 793int
573lpfc_sli_handle_mb_event(struct lpfc_hba * phba) 794lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
574{ 795{
575 MAILBOX_t *mbox;
576 MAILBOX_t *pmbox; 796 MAILBOX_t *pmbox;
577 LPFC_MBOXQ_t *pmb; 797 LPFC_MBOXQ_t *pmb;
578 struct lpfc_sli *psli; 798 int rc;
579 int i, rc; 799 LIST_HEAD(cmplq);
580 uint32_t process_next;
581
582 psli = &phba->sli;
583 /* We should only get here if we are in SLI2 mode */
584 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
585 return (1);
586 }
587 800
588 phba->sli.slistat.mbox_event++; 801 phba->sli.slistat.mbox_event++;
589 802
803 /* Get all completed mailboxe buffers into the cmplq */
804 spin_lock_irq(&phba->hbalock);
805 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
806 spin_unlock_irq(&phba->hbalock);
807
590 /* Get a Mailbox buffer to setup mailbox commands for callback */ 808 /* Get a Mailbox buffer to setup mailbox commands for callback */
591 if ((pmb = phba->sli.mbox_active)) { 809 do {
592 pmbox = &pmb->mb; 810 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
593 mbox = &phba->slim2p->mbx; 811 if (pmb == NULL)
812 break;
594 813
595 /* First check out the status word */ 814 pmbox = &pmb->mb;
596 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
597 815
598 /* Sanity check to ensure the host owns the mailbox */ 816 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
599 if (pmbox->mbxOwner != OWN_HOST) { 817 if (pmb->vport) {
600 /* Lets try for a while */ 818 lpfc_debugfs_disc_trc(pmb->vport,
601 for (i = 0; i < 10240; i++) { 819 LPFC_DISC_TRC_MBOX_VPORT,
602 /* First copy command data */ 820 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
603 lpfc_sli_pcimem_bcopy(mbox, pmbox, 821 (uint32_t)pmbox->mbxCommand,
604 sizeof (uint32_t)); 822 pmbox->un.varWords[0],
605 if (pmbox->mbxOwner == OWN_HOST) 823 pmbox->un.varWords[1]);
606 goto mbout; 824 }
825 else {
826 lpfc_debugfs_disc_trc(phba->pport,
827 LPFC_DISC_TRC_MBOX,
828 "MBOX cmpl: cmd:x%x mb:x%x x%x",
829 (uint32_t)pmbox->mbxCommand,
830 pmbox->un.varWords[0],
831 pmbox->un.varWords[1]);
607 } 832 }
608 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
609 <status> */
610 lpfc_printf_log(phba,
611 KERN_WARNING,
612 LOG_MBOX | LOG_SLI,
613 "%d:0304 Stray Mailbox Interrupt "
614 "mbxCommand x%x mbxStatus x%x\n",
615 phba->brd_no,
616 pmbox->mbxCommand,
617 pmbox->mbxStatus);
618
619 spin_lock_irq(phba->host->host_lock);
620 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
621 spin_unlock_irq(phba->host->host_lock);
622 return (1);
623 } 833 }
624 834
625 mbout:
626 del_timer_sync(&phba->sli.mbox_tmo);
627 phba->work_hba_events &= ~WORKER_MBOX_TMO;
628
629 /* 835 /*
630 * It is a fatal error if unknown mbox command completion. 836 * It is a fatal error if unknown mbox command completion.
631 */ 837 */
@@ -633,51 +839,50 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
633 MBX_SHUTDOWN) { 839 MBX_SHUTDOWN) {
634 840
635 /* Unknow mailbox command compl */ 841 /* Unknow mailbox command compl */
636 lpfc_printf_log(phba, 842 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
637 KERN_ERR, 843 "%d (%d):0323 Unknown Mailbox command "
638 LOG_MBOX | LOG_SLI, 844 "%x Cmpl\n",
639 "%d:0323 Unknown Mailbox command %x Cmpl\n", 845 phba->brd_no,
640 phba->brd_no, 846 pmb->vport ? pmb->vport->vpi : 0,
641 pmbox->mbxCommand); 847 pmbox->mbxCommand);
642 phba->hba_state = LPFC_HBA_ERROR; 848 phba->link_state = LPFC_HBA_ERROR;
643 phba->work_hs = HS_FFER3; 849 phba->work_hs = HS_FFER3;
644 lpfc_handle_eratt(phba); 850 lpfc_handle_eratt(phba);
645 return (0); 851 continue;
646 } 852 }
647 853
648 phba->sli.mbox_active = NULL;
649 if (pmbox->mbxStatus) { 854 if (pmbox->mbxStatus) {
650 phba->sli.slistat.mbox_stat_err++; 855 phba->sli.slistat.mbox_stat_err++;
651 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 856 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
652 /* Mbox cmd cmpl error - RETRYing */ 857 /* Mbox cmd cmpl error - RETRYing */
653 lpfc_printf_log(phba, 858 lpfc_printf_log(phba, KERN_INFO,
654 KERN_INFO, 859 LOG_MBOX | LOG_SLI,
655 LOG_MBOX | LOG_SLI, 860 "%d (%d):0305 Mbox cmd cmpl "
656 "%d:0305 Mbox cmd cmpl error - " 861 "error - RETRYing Data: x%x "
657 "RETRYing Data: x%x x%x x%x x%x\n", 862 "x%x x%x x%x\n",
658 phba->brd_no, 863 phba->brd_no,
659 pmbox->mbxCommand, 864 pmb->vport ? pmb->vport->vpi :0,
660 pmbox->mbxStatus, 865 pmbox->mbxCommand,
661 pmbox->un.varWords[0], 866 pmbox->mbxStatus,
662 phba->hba_state); 867 pmbox->un.varWords[0],
868 pmb->vport->port_state);
663 pmbox->mbxStatus = 0; 869 pmbox->mbxStatus = 0;
664 pmbox->mbxOwner = OWN_HOST; 870 pmbox->mbxOwner = OWN_HOST;
665 spin_lock_irq(phba->host->host_lock); 871 spin_lock_irq(&phba->hbalock);
666 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 872 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
667 spin_unlock_irq(phba->host->host_lock); 873 spin_unlock_irq(&phba->hbalock);
668 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 874 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
669 if (rc == MBX_SUCCESS) 875 if (rc == MBX_SUCCESS)
670 return (0); 876 continue;
671 } 877 }
672 } 878 }
673 879
674 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 880 /* Mailbox cmd <cmd> Cmpl <cmpl> */
675 lpfc_printf_log(phba, 881 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
676 KERN_INFO, 882 "%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
677 LOG_MBOX | LOG_SLI,
678 "%d:0307 Mailbox cmd x%x Cmpl x%p "
679 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 883 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
680 phba->brd_no, 884 phba->brd_no,
885 pmb->vport ? pmb->vport->vpi : 0,
681 pmbox->mbxCommand, 886 pmbox->mbxCommand,
682 pmb->mbox_cmpl, 887 pmb->mbox_cmpl,
683 *((uint32_t *) pmbox), 888 *((uint32_t *) pmbox),
@@ -690,39 +895,35 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
690 pmbox->un.varWords[6], 895 pmbox->un.varWords[6],
691 pmbox->un.varWords[7]); 896 pmbox->un.varWords[7]);
692 897
693 if (pmb->mbox_cmpl) { 898 if (pmb->mbox_cmpl)
694 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
695 pmb->mbox_cmpl(phba,pmb); 899 pmb->mbox_cmpl(phba,pmb);
696 } 900 } while (1);
697 } 901 return 0;
698 902}
699
700 do {
701 process_next = 0; /* by default don't loop */
702 spin_lock_irq(phba->host->host_lock);
703 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
704
705 /* Process next mailbox command if there is one */
706 if ((pmb = lpfc_mbox_get(phba))) {
707 spin_unlock_irq(phba->host->host_lock);
708 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
709 if (rc == MBX_NOT_FINISHED) {
710 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
711 pmb->mbox_cmpl(phba,pmb);
712 process_next = 1;
713 continue; /* loop back */
714 }
715 } else {
716 spin_unlock_irq(phba->host->host_lock);
717 /* Turn on IOCB processing */
718 for (i = 0; i < phba->sli.num_rings; i++)
719 lpfc_sli_turn_on_ring(phba, i);
720 }
721
722 } while (process_next);
723 903
724 return (0); 904static struct lpfc_dmabuf *
905lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
906{
907 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
908
909 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
910 if (hbq_entry == NULL)
911 return NULL;
912 list_del(&hbq_entry->dbuf.list);
913 new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
914 if (new_hbq_entry == NULL)
915 return &hbq_entry->dbuf;
916 new_hbq_entry->dbuf = hbq_entry->dbuf;
917 new_hbq_entry->tag = -1;
918 hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
919 if (hbq_entry->dbuf.virt == NULL) {
920 kfree(new_hbq_entry);
921 return &hbq_entry->dbuf;
922 }
923 lpfc_sli_free_hbq(phba, hbq_entry);
924 return &new_hbq_entry->dbuf;
725} 925}
926
726static int 927static int
727lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 928lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
728 struct lpfc_iocbq *saveq) 929 struct lpfc_iocbq *saveq)
@@ -735,7 +936,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
735 match = 0; 936 match = 0;
736 irsp = &(saveq->iocb); 937 irsp = &(saveq->iocb);
737 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) 938 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
738 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) { 939 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
940 || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
941 || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
739 Rctl = FC_ELS_REQ; 942 Rctl = FC_ELS_REQ;
740 Type = FC_ELS_DATA; 943 Type = FC_ELS_DATA;
741 } else { 944 } else {
@@ -747,13 +950,24 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
747 950
748 /* Firmware Workaround */ 951 /* Firmware Workaround */
749 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 952 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
750 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) { 953 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
954 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
751 Rctl = FC_ELS_REQ; 955 Rctl = FC_ELS_REQ;
752 Type = FC_ELS_DATA; 956 Type = FC_ELS_DATA;
753 w5p->hcsw.Rctl = Rctl; 957 w5p->hcsw.Rctl = Rctl;
754 w5p->hcsw.Type = Type; 958 w5p->hcsw.Type = Type;
755 } 959 }
756 } 960 }
961
962 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
963 if (irsp->ulpBdeCount != 0)
964 saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
965 irsp->un.ulpWord[3]);
966 if (irsp->ulpBdeCount == 2)
967 saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
968 irsp->un.ulpWord[15]);
969 }
970
757 /* unSolicited Responses */ 971 /* unSolicited Responses */
758 if (pring->prt[0].profile) { 972 if (pring->prt[0].profile) {
759 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 973 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -781,23 +995,21 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
781 /* Unexpected Rctl / Type received */ 995 /* Unexpected Rctl / Type received */
782 /* Ring <ringno> handler: unexpected 996 /* Ring <ringno> handler: unexpected
783 Rctl <Rctl> Type <Type> received */ 997 Rctl <Rctl> Type <Type> received */
784 lpfc_printf_log(phba, 998 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
785 KERN_WARNING,
786 LOG_SLI,
787 "%d:0313 Ring %d handler: unexpected Rctl x%x " 999 "%d:0313 Ring %d handler: unexpected Rctl x%x "
788 "Type x%x received \n", 1000 "Type x%x received\n",
789 phba->brd_no, 1001 phba->brd_no,
790 pring->ringno, 1002 pring->ringno,
791 Rctl, 1003 Rctl,
792 Type); 1004 Type);
793 } 1005 }
794 return(1); 1006 return 1;
795} 1007}
796 1008
797static struct lpfc_iocbq * 1009static struct lpfc_iocbq *
798lpfc_sli_iocbq_lookup(struct lpfc_hba * phba, 1010lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
799 struct lpfc_sli_ring * pring, 1011 struct lpfc_sli_ring *pring,
800 struct lpfc_iocbq * prspiocb) 1012 struct lpfc_iocbq *prspiocb)
801{ 1013{
802 struct lpfc_iocbq *cmd_iocb = NULL; 1014 struct lpfc_iocbq *cmd_iocb = NULL;
803 uint16_t iotag; 1015 uint16_t iotag;
@@ -806,7 +1018,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
806 1018
807 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 1019 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
808 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 1020 cmd_iocb = phba->sli.iocbq_lookup[iotag];
809 list_del(&cmd_iocb->list); 1021 list_del_init(&cmd_iocb->list);
810 pring->txcmplq_cnt--; 1022 pring->txcmplq_cnt--;
811 return cmd_iocb; 1023 return cmd_iocb;
812 } 1024 }
@@ -821,16 +1033,18 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
821} 1033}
822 1034
823static int 1035static int
824lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 1036lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
825 struct lpfc_iocbq *saveq) 1037 struct lpfc_iocbq *saveq)
826{ 1038{
827 struct lpfc_iocbq * cmdiocbp; 1039 struct lpfc_iocbq *cmdiocbp;
828 int rc = 1; 1040 int rc = 1;
829 unsigned long iflag; 1041 unsigned long iflag;
830 1042
831 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 1043 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
832 spin_lock_irqsave(phba->host->host_lock, iflag); 1044 spin_lock_irqsave(&phba->hbalock, iflag);
833 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 1045 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
1046 spin_unlock_irqrestore(&phba->hbalock, iflag);
1047
834 if (cmdiocbp) { 1048 if (cmdiocbp) {
835 if (cmdiocbp->iocb_cmpl) { 1049 if (cmdiocbp->iocb_cmpl) {
836 /* 1050 /*
@@ -846,17 +1060,8 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
846 saveq->iocb.un.ulpWord[4] = 1060 saveq->iocb.un.ulpWord[4] =
847 IOERR_SLI_ABORTED; 1061 IOERR_SLI_ABORTED;
848 } 1062 }
849 spin_unlock_irqrestore(phba->host->host_lock,
850 iflag);
851 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
852 spin_lock_irqsave(phba->host->host_lock, iflag);
853 }
854 else {
855 spin_unlock_irqrestore(phba->host->host_lock,
856 iflag);
857 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
858 spin_lock_irqsave(phba->host->host_lock, iflag);
859 } 1063 }
1064 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
860 } else 1065 } else
861 lpfc_sli_release_iocbq(phba, cmdiocbp); 1066 lpfc_sli_release_iocbq(phba, cmdiocbp);
862 } else { 1067 } else {
@@ -870,29 +1075,30 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
870 * Ring <ringno> handler: unexpected completion IoTag 1075 * Ring <ringno> handler: unexpected completion IoTag
871 * <IoTag> 1076 * <IoTag>
872 */ 1077 */
873 lpfc_printf_log(phba, 1078 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
874 KERN_WARNING, 1079 "%d (%d):0322 Ring %d handler: "
875 LOG_SLI, 1080 "unexpected completion IoTag x%x "
876 "%d:0322 Ring %d handler: unexpected " 1081 "Data: x%x x%x x%x x%x\n",
877 "completion IoTag x%x Data: x%x x%x x%x x%x\n", 1082 phba->brd_no,
878 phba->brd_no, 1083 cmdiocbp->vport->vpi,
879 pring->ringno, 1084 pring->ringno,
880 saveq->iocb.ulpIoTag, 1085 saveq->iocb.ulpIoTag,
881 saveq->iocb.ulpStatus, 1086 saveq->iocb.ulpStatus,
882 saveq->iocb.un.ulpWord[4], 1087 saveq->iocb.un.ulpWord[4],
883 saveq->iocb.ulpCommand, 1088 saveq->iocb.ulpCommand,
884 saveq->iocb.ulpContext); 1089 saveq->iocb.ulpContext);
885 } 1090 }
886 } 1091 }
887 1092
888 spin_unlock_irqrestore(phba->host->host_lock, iflag);
889 return rc; 1093 return rc;
890} 1094}
891 1095
892static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba, 1096static void
893 struct lpfc_sli_ring * pring) 1097lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
894{ 1098{
895 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1099 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1100 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1101 &phba->slim2p->mbx.us.s2.port[pring->ringno];
896 /* 1102 /*
897 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1103 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
898 * rsp ring <portRspMax> 1104 * rsp ring <portRspMax>
@@ -904,7 +1110,7 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
904 le32_to_cpu(pgp->rspPutInx), 1110 le32_to_cpu(pgp->rspPutInx),
905 pring->numRiocb); 1111 pring->numRiocb);
906 1112
907 phba->hba_state = LPFC_HBA_ERROR; 1113 phba->link_state = LPFC_HBA_ERROR;
908 1114
909 /* 1115 /*
910 * All error attention handlers are posted to 1116 * All error attention handlers are posted to
@@ -912,16 +1118,18 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
912 */ 1118 */
913 phba->work_ha |= HA_ERATT; 1119 phba->work_ha |= HA_ERATT;
914 phba->work_hs = HS_FFER3; 1120 phba->work_hs = HS_FFER3;
1121
1122 /* hbalock should already be held */
915 if (phba->work_wait) 1123 if (phba->work_wait)
916 wake_up(phba->work_wait); 1124 lpfc_worker_wake_up(phba);
917 1125
918 return; 1126 return;
919} 1127}
920 1128
921void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) 1129void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
922{ 1130{
923 struct lpfc_sli * psli = &phba->sli; 1131 struct lpfc_sli *psli = &phba->sli;
924 struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING]; 1132 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
925 IOCB_t *irsp = NULL; 1133 IOCB_t *irsp = NULL;
926 IOCB_t *entry = NULL; 1134 IOCB_t *entry = NULL;
927 struct lpfc_iocbq *cmdiocbq = NULL; 1135 struct lpfc_iocbq *cmdiocbq = NULL;
@@ -931,13 +1139,15 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
931 uint32_t portRspPut, portRspMax; 1139 uint32_t portRspPut, portRspMax;
932 int type; 1140 int type;
933 uint32_t rsp_cmpl = 0; 1141 uint32_t rsp_cmpl = 0;
934 void __iomem *to_slim;
935 uint32_t ha_copy; 1142 uint32_t ha_copy;
1143 unsigned long iflags;
936 1144
937 pring->stats.iocb_event++; 1145 pring->stats.iocb_event++;
938 1146
939 /* The driver assumes SLI-2 mode */ 1147 pgp = (phba->sli_rev == 3) ?
940 pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1148 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1149 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1150
941 1151
942 /* 1152 /*
943 * The next available response entry should never exceed the maximum 1153 * The next available response entry should never exceed the maximum
@@ -952,15 +1162,13 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
952 1162
953 rmb(); 1163 rmb();
954 while (pring->rspidx != portRspPut) { 1164 while (pring->rspidx != portRspPut) {
955 1165 entry = lpfc_resp_iocb(phba, pring);
956 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
957
958 if (++pring->rspidx >= portRspMax) 1166 if (++pring->rspidx >= portRspMax)
959 pring->rspidx = 0; 1167 pring->rspidx = 0;
960 1168
961 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1169 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
962 (uint32_t *) &rspiocbq.iocb, 1170 (uint32_t *) &rspiocbq.iocb,
963 sizeof (IOCB_t)); 1171 phba->iocb_rsp_size);
964 irsp = &rspiocbq.iocb; 1172 irsp = &rspiocbq.iocb;
965 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1173 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
966 pring->stats.iocb_rsp++; 1174 pring->stats.iocb_rsp++;
@@ -998,8 +1206,10 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
998 break; 1206 break;
999 } 1207 }
1000 1208
1209 spin_lock_irqsave(&phba->hbalock, iflags);
1001 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1210 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1002 &rspiocbq); 1211 &rspiocbq);
1212 spin_unlock_irqrestore(&phba->hbalock, iflags);
1003 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1213 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1004 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1214 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1005 &rspiocbq); 1215 &rspiocbq);
@@ -1033,9 +1243,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
1033 * been updated, sync the pgp->rspPutInx and fetch the new port 1243 * been updated, sync the pgp->rspPutInx and fetch the new port
1034 * response put pointer. 1244 * response put pointer.
1035 */ 1245 */
1036 to_slim = phba->MBslimaddr + 1246 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1037 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1038 writeb(pring->rspidx, to_slim);
1039 1247
1040 if (pring->rspidx == portRspPut) 1248 if (pring->rspidx == portRspPut)
1041 portRspPut = le32_to_cpu(pgp->rspPutInx); 1249 portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1045,13 +1253,16 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
1045 ha_copy >>= (LPFC_FCP_RING * 4); 1253 ha_copy >>= (LPFC_FCP_RING * 4);
1046 1254
1047 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { 1255 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1256 spin_lock_irqsave(&phba->hbalock, iflags);
1048 pring->stats.iocb_rsp_full++; 1257 pring->stats.iocb_rsp_full++;
1049 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); 1258 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1050 writel(status, phba->CAregaddr); 1259 writel(status, phba->CAregaddr);
1051 readl(phba->CAregaddr); 1260 readl(phba->CAregaddr);
1261 spin_unlock_irqrestore(&phba->hbalock, iflags);
1052 } 1262 }
1053 if ((ha_copy & HA_R0CE_RSP) && 1263 if ((ha_copy & HA_R0CE_RSP) &&
1054 (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1264 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1265 spin_lock_irqsave(&phba->hbalock, iflags);
1055 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1266 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1056 pring->stats.iocb_cmd_empty++; 1267 pring->stats.iocb_cmd_empty++;
1057 1268
@@ -1062,6 +1273,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
1062 if ((pring->lpfc_sli_cmd_available)) 1273 if ((pring->lpfc_sli_cmd_available))
1063 (pring->lpfc_sli_cmd_available) (phba, pring); 1274 (pring->lpfc_sli_cmd_available) (phba, pring);
1064 1275
1276 spin_unlock_irqrestore(&phba->hbalock, iflags);
1065 } 1277 }
1066 1278
1067 return; 1279 return;
@@ -1072,10 +1284,12 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
1072 * to check it explicitly. 1284 * to check it explicitly.
1073 */ 1285 */
1074static int 1286static int
1075lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, 1287lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1076 struct lpfc_sli_ring * pring, uint32_t mask) 1288 struct lpfc_sli_ring *pring, uint32_t mask)
1077{ 1289{
1078 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1290 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1291 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1292 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1079 IOCB_t *irsp = NULL; 1293 IOCB_t *irsp = NULL;
1080 IOCB_t *entry = NULL; 1294 IOCB_t *entry = NULL;
1081 struct lpfc_iocbq *cmdiocbq = NULL; 1295 struct lpfc_iocbq *cmdiocbq = NULL;
@@ -1086,9 +1300,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1086 lpfc_iocb_type type; 1300 lpfc_iocb_type type;
1087 unsigned long iflag; 1301 unsigned long iflag;
1088 uint32_t rsp_cmpl = 0; 1302 uint32_t rsp_cmpl = 0;
1089 void __iomem *to_slim;
1090 1303
1091 spin_lock_irqsave(phba->host->host_lock, iflag); 1304 spin_lock_irqsave(&phba->hbalock, iflag);
1092 pring->stats.iocb_event++; 1305 pring->stats.iocb_event++;
1093 1306
1094 /* 1307 /*
@@ -1099,7 +1312,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1099 portRspPut = le32_to_cpu(pgp->rspPutInx); 1312 portRspPut = le32_to_cpu(pgp->rspPutInx);
1100 if (unlikely(portRspPut >= portRspMax)) { 1313 if (unlikely(portRspPut >= portRspMax)) {
1101 lpfc_sli_rsp_pointers_error(phba, pring); 1314 lpfc_sli_rsp_pointers_error(phba, pring);
1102 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1315 spin_unlock_irqrestore(&phba->hbalock, iflag);
1103 return 1; 1316 return 1;
1104 } 1317 }
1105 1318
@@ -1110,14 +1323,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1110 * structure. The copy involves a byte-swap since the 1323 * structure. The copy involves a byte-swap since the
1111 * network byte order and pci byte orders are different. 1324 * network byte order and pci byte orders are different.
1112 */ 1325 */
1113 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1326 entry = lpfc_resp_iocb(phba, pring);
1327 phba->last_completion_time = jiffies;
1114 1328
1115 if (++pring->rspidx >= portRspMax) 1329 if (++pring->rspidx >= portRspMax)
1116 pring->rspidx = 0; 1330 pring->rspidx = 0;
1117 1331
1118 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1332 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1119 (uint32_t *) &rspiocbq.iocb, 1333 (uint32_t *) &rspiocbq.iocb,
1120 sizeof (IOCB_t)); 1334 phba->iocb_rsp_size);
1121 INIT_LIST_HEAD(&(rspiocbq.list)); 1335 INIT_LIST_HEAD(&(rspiocbq.list));
1122 irsp = &rspiocbq.iocb; 1336 irsp = &rspiocbq.iocb;
1123 1337
@@ -1126,16 +1340,30 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1126 rsp_cmpl++; 1340 rsp_cmpl++;
1127 1341
1128 if (unlikely(irsp->ulpStatus)) { 1342 if (unlikely(irsp->ulpStatus)) {
1343 /*
1344 * If resource errors reported from HBA, reduce
1345 * queuedepths of the SCSI device.
1346 */
1347 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1348 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1349 spin_unlock_irqrestore(&phba->hbalock, iflag);
1350 lpfc_adjust_queue_depth(phba);
1351 spin_lock_irqsave(&phba->hbalock, iflag);
1352 }
1353
1129 /* Rsp ring <ringno> error: IOCB */ 1354 /* Rsp ring <ringno> error: IOCB */
1130 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1355 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1131 "%d:0336 Rsp Ring %d error: IOCB Data: " 1356 "%d:0336 Rsp Ring %d error: IOCB Data: "
1132 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1357 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1133 phba->brd_no, pring->ringno, 1358 phba->brd_no, pring->ringno,
1134 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1359 irsp->un.ulpWord[0],
1135 irsp->un.ulpWord[2], irsp->un.ulpWord[3], 1360 irsp->un.ulpWord[1],
1136 irsp->un.ulpWord[4], irsp->un.ulpWord[5], 1361 irsp->un.ulpWord[2],
1137 *(((uint32_t *) irsp) + 6), 1362 irsp->un.ulpWord[3],
1138 *(((uint32_t *) irsp) + 7)); 1363 irsp->un.ulpWord[4],
1364 irsp->un.ulpWord[5],
1365 *(((uint32_t *) irsp) + 6),
1366 *(((uint32_t *) irsp) + 7));
1139 } 1367 }
1140 1368
1141 switch (type) { 1369 switch (type) {
@@ -1149,7 +1377,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1149 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1377 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1150 "%d:0333 IOCB cmd 0x%x" 1378 "%d:0333 IOCB cmd 0x%x"
1151 " processed. Skipping" 1379 " processed. Skipping"
1152 " completion\n", phba->brd_no, 1380 " completion\n",
1381 phba->brd_no,
1153 irsp->ulpCommand); 1382 irsp->ulpCommand);
1154 break; 1383 break;
1155 } 1384 }
@@ -1161,19 +1390,19 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1161 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1390 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1162 &rspiocbq); 1391 &rspiocbq);
1163 } else { 1392 } else {
1164 spin_unlock_irqrestore( 1393 spin_unlock_irqrestore(&phba->hbalock,
1165 phba->host->host_lock, iflag); 1394 iflag);
1166 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1395 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1167 &rspiocbq); 1396 &rspiocbq);
1168 spin_lock_irqsave(phba->host->host_lock, 1397 spin_lock_irqsave(&phba->hbalock,
1169 iflag); 1398 iflag);
1170 } 1399 }
1171 } 1400 }
1172 break; 1401 break;
1173 case LPFC_UNSOL_IOCB: 1402 case LPFC_UNSOL_IOCB:
1174 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1403 spin_unlock_irqrestore(&phba->hbalock, iflag);
1175 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 1404 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1176 spin_lock_irqsave(phba->host->host_lock, iflag); 1405 spin_lock_irqsave(&phba->hbalock, iflag);
1177 break; 1406 break;
1178 default: 1407 default:
1179 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1408 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
@@ -1186,11 +1415,13 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1186 } else { 1415 } else {
1187 /* Unknown IOCB command */ 1416 /* Unknown IOCB command */
1188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1417 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1189 "%d:0334 Unknown IOCB command " 1418 "%d:0334 Unknown IOCB command "
1190 "Data: x%x, x%x x%x x%x x%x\n", 1419 "Data: x%x, x%x x%x x%x x%x\n",
1191 phba->brd_no, type, irsp->ulpCommand, 1420 phba->brd_no, type,
1192 irsp->ulpStatus, irsp->ulpIoTag, 1421 irsp->ulpCommand,
1193 irsp->ulpContext); 1422 irsp->ulpStatus,
1423 irsp->ulpIoTag,
1424 irsp->ulpContext);
1194 } 1425 }
1195 break; 1426 break;
1196 } 1427 }
@@ -1201,9 +1432,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1201 * been updated, sync the pgp->rspPutInx and fetch the new port 1432 * been updated, sync the pgp->rspPutInx and fetch the new port
1202 * response put pointer. 1433 * response put pointer.
1203 */ 1434 */
1204 to_slim = phba->MBslimaddr + 1435 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1205 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1206 writel(pring->rspidx, to_slim);
1207 1436
1208 if (pring->rspidx == portRspPut) 1437 if (pring->rspidx == portRspPut)
1209 portRspPut = le32_to_cpu(pgp->rspPutInx); 1438 portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1228,31 +1457,31 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1228 1457
1229 } 1458 }
1230 1459
1231 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1460 spin_unlock_irqrestore(&phba->hbalock, iflag);
1232 return rc; 1461 return rc;
1233} 1462}
1234 1463
1235
1236int 1464int
1237lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, 1465lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1238 struct lpfc_sli_ring * pring, uint32_t mask) 1466 struct lpfc_sli_ring *pring, uint32_t mask)
1239{ 1467{
1468 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1469 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1470 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1240 IOCB_t *entry; 1471 IOCB_t *entry;
1241 IOCB_t *irsp = NULL; 1472 IOCB_t *irsp = NULL;
1242 struct lpfc_iocbq *rspiocbp = NULL; 1473 struct lpfc_iocbq *rspiocbp = NULL;
1243 struct lpfc_iocbq *next_iocb; 1474 struct lpfc_iocbq *next_iocb;
1244 struct lpfc_iocbq *cmdiocbp; 1475 struct lpfc_iocbq *cmdiocbp;
1245 struct lpfc_iocbq *saveq; 1476 struct lpfc_iocbq *saveq;
1246 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1247 uint8_t iocb_cmd_type; 1477 uint8_t iocb_cmd_type;
1248 lpfc_iocb_type type; 1478 lpfc_iocb_type type;
1249 uint32_t status, free_saveq; 1479 uint32_t status, free_saveq;
1250 uint32_t portRspPut, portRspMax; 1480 uint32_t portRspPut, portRspMax;
1251 int rc = 1; 1481 int rc = 1;
1252 unsigned long iflag; 1482 unsigned long iflag;
1253 void __iomem *to_slim;
1254 1483
1255 spin_lock_irqsave(phba->host->host_lock, iflag); 1484 spin_lock_irqsave(&phba->hbalock, iflag);
1256 pring->stats.iocb_event++; 1485 pring->stats.iocb_event++;
1257 1486
1258 /* 1487 /*
@@ -1266,16 +1495,14 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1266 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1495 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1267 * rsp ring <portRspMax> 1496 * rsp ring <portRspMax>
1268 */ 1497 */
1269 lpfc_printf_log(phba, 1498 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1270 KERN_ERR,
1271 LOG_SLI,
1272 "%d:0303 Ring %d handler: portRspPut %d " 1499 "%d:0303 Ring %d handler: portRspPut %d "
1273 "is bigger then rsp ring %d\n", 1500 "is bigger then rsp ring %d\n",
1274 phba->brd_no, 1501 phba->brd_no, pring->ringno, portRspPut,
1275 pring->ringno, portRspPut, portRspMax); 1502 portRspMax);
1276 1503
1277 phba->hba_state = LPFC_HBA_ERROR; 1504 phba->link_state = LPFC_HBA_ERROR;
1278 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1505 spin_unlock_irqrestore(&phba->hbalock, iflag);
1279 1506
1280 phba->work_hs = HS_FFER3; 1507 phba->work_hs = HS_FFER3;
1281 lpfc_handle_eratt(phba); 1508 lpfc_handle_eratt(phba);
@@ -1298,23 +1525,24 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1298 * the ulpLe field is set, the entire Command has been 1525 * the ulpLe field is set, the entire Command has been
1299 * received. 1526 * received.
1300 */ 1527 */
1301 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1528 entry = lpfc_resp_iocb(phba, pring);
1302 rspiocbp = lpfc_sli_get_iocbq(phba); 1529
1530 phba->last_completion_time = jiffies;
1531 rspiocbp = __lpfc_sli_get_iocbq(phba);
1303 if (rspiocbp == NULL) { 1532 if (rspiocbp == NULL) {
1304 printk(KERN_ERR "%s: out of buffers! Failing " 1533 printk(KERN_ERR "%s: out of buffers! Failing "
1305 "completion.\n", __FUNCTION__); 1534 "completion.\n", __FUNCTION__);
1306 break; 1535 break;
1307 } 1536 }
1308 1537
1309 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t)); 1538 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1539 phba->iocb_rsp_size);
1310 irsp = &rspiocbp->iocb; 1540 irsp = &rspiocbp->iocb;
1311 1541
1312 if (++pring->rspidx >= portRspMax) 1542 if (++pring->rspidx >= portRspMax)
1313 pring->rspidx = 0; 1543 pring->rspidx = 0;
1314 1544
1315 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2) 1545 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1316 + 1) * 4;
1317 writel(pring->rspidx, to_slim);
1318 1546
1319 if (list_empty(&(pring->iocb_continueq))) { 1547 if (list_empty(&(pring->iocb_continueq))) {
1320 list_add(&rspiocbp->list, &(pring->iocb_continueq)); 1548 list_add(&rspiocbp->list, &(pring->iocb_continueq));
@@ -1338,23 +1566,44 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1338 1566
1339 pring->stats.iocb_rsp++; 1567 pring->stats.iocb_rsp++;
1340 1568
1569 /*
1570 * If resource errors reported from HBA, reduce
1571 * queuedepths of the SCSI device.
1572 */
1573 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1574 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1575 spin_unlock_irqrestore(&phba->hbalock, iflag);
1576 lpfc_adjust_queue_depth(phba);
1577 spin_lock_irqsave(&phba->hbalock, iflag);
1578 }
1579
1341 if (irsp->ulpStatus) { 1580 if (irsp->ulpStatus) {
1342 /* Rsp ring <ringno> error: IOCB */ 1581 /* Rsp ring <ringno> error: IOCB */
1343 lpfc_printf_log(phba, 1582 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1344 KERN_WARNING, 1583 "%d:0328 Rsp Ring %d error: "
1345 LOG_SLI, 1584 "IOCB Data: "
1346 "%d:0328 Rsp Ring %d error: IOCB Data: " 1585 "x%x x%x x%x x%x "
1347 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1586 "x%x x%x x%x x%x "
1348 phba->brd_no, 1587 "x%x x%x x%x x%x "
1349 pring->ringno, 1588 "x%x x%x x%x x%x\n",
1350 irsp->un.ulpWord[0], 1589 phba->brd_no,
1351 irsp->un.ulpWord[1], 1590 pring->ringno,
1352 irsp->un.ulpWord[2], 1591 irsp->un.ulpWord[0],
1353 irsp->un.ulpWord[3], 1592 irsp->un.ulpWord[1],
1354 irsp->un.ulpWord[4], 1593 irsp->un.ulpWord[2],
1355 irsp->un.ulpWord[5], 1594 irsp->un.ulpWord[3],
1356 *(((uint32_t *) irsp) + 6), 1595 irsp->un.ulpWord[4],
1357 *(((uint32_t *) irsp) + 7)); 1596 irsp->un.ulpWord[5],
1597 *(((uint32_t *) irsp) + 6),
1598 *(((uint32_t *) irsp) + 7),
1599 *(((uint32_t *) irsp) + 8),
1600 *(((uint32_t *) irsp) + 9),
1601 *(((uint32_t *) irsp) + 10),
1602 *(((uint32_t *) irsp) + 11),
1603 *(((uint32_t *) irsp) + 12),
1604 *(((uint32_t *) irsp) + 13),
1605 *(((uint32_t *) irsp) + 14),
1606 *(((uint32_t *) irsp) + 15));
1358 } 1607 }
1359 1608
1360 /* 1609 /*
@@ -1366,17 +1615,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1366 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1615 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1367 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1616 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1368 if (type == LPFC_SOL_IOCB) { 1617 if (type == LPFC_SOL_IOCB) {
1369 spin_unlock_irqrestore(phba->host->host_lock, 1618 spin_unlock_irqrestore(&phba->hbalock,
1370 iflag); 1619 iflag);
1371 rc = lpfc_sli_process_sol_iocb(phba, pring, 1620 rc = lpfc_sli_process_sol_iocb(phba, pring,
1372 saveq); 1621 saveq);
1373 spin_lock_irqsave(phba->host->host_lock, iflag); 1622 spin_lock_irqsave(&phba->hbalock, iflag);
1374 } else if (type == LPFC_UNSOL_IOCB) { 1623 } else if (type == LPFC_UNSOL_IOCB) {
1375 spin_unlock_irqrestore(phba->host->host_lock, 1624 spin_unlock_irqrestore(&phba->hbalock,
1376 iflag); 1625 iflag);
1377 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1626 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1378 saveq); 1627 saveq);
1379 spin_lock_irqsave(phba->host->host_lock, iflag); 1628 spin_lock_irqsave(&phba->hbalock, iflag);
1380 } else if (type == LPFC_ABORT_IOCB) { 1629 } else if (type == LPFC_ABORT_IOCB) {
1381 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1630 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1382 ((cmdiocbp = 1631 ((cmdiocbp =
@@ -1386,15 +1635,15 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1386 routine */ 1635 routine */
1387 if (cmdiocbp->iocb_cmpl) { 1636 if (cmdiocbp->iocb_cmpl) {
1388 spin_unlock_irqrestore( 1637 spin_unlock_irqrestore(
1389 phba->host->host_lock, 1638 &phba->hbalock,
1390 iflag); 1639 iflag);
1391 (cmdiocbp->iocb_cmpl) (phba, 1640 (cmdiocbp->iocb_cmpl) (phba,
1392 cmdiocbp, saveq); 1641 cmdiocbp, saveq);
1393 spin_lock_irqsave( 1642 spin_lock_irqsave(
1394 phba->host->host_lock, 1643 &phba->hbalock,
1395 iflag); 1644 iflag);
1396 } else 1645 } else
1397 lpfc_sli_release_iocbq(phba, 1646 __lpfc_sli_release_iocbq(phba,
1398 cmdiocbp); 1647 cmdiocbp);
1399 } 1648 }
1400 } else if (type == LPFC_UNKNOWN_IOCB) { 1649 } else if (type == LPFC_UNKNOWN_IOCB) {
@@ -1411,32 +1660,28 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1411 phba->brd_no, adaptermsg); 1660 phba->brd_no, adaptermsg);
1412 } else { 1661 } else {
1413 /* Unknown IOCB command */ 1662 /* Unknown IOCB command */
1414 lpfc_printf_log(phba, 1663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1415 KERN_ERR, 1664 "%d:0335 Unknown IOCB "
1416 LOG_SLI, 1665 "command Data: x%x "
1417 "%d:0335 Unknown IOCB command " 1666 "x%x x%x x%x\n",
1418 "Data: x%x x%x x%x x%x\n", 1667 phba->brd_no,
1419 phba->brd_no, 1668 irsp->ulpCommand,
1420 irsp->ulpCommand, 1669 irsp->ulpStatus,
1421 irsp->ulpStatus, 1670 irsp->ulpIoTag,
1422 irsp->ulpIoTag, 1671 irsp->ulpContext);
1423 irsp->ulpContext);
1424 } 1672 }
1425 } 1673 }
1426 1674
1427 if (free_saveq) { 1675 if (free_saveq) {
1428 if (!list_empty(&saveq->list)) { 1676 list_for_each_entry_safe(rspiocbp, next_iocb,
1429 list_for_each_entry_safe(rspiocbp, 1677 &saveq->list, list) {
1430 next_iocb, 1678 list_del(&rspiocbp->list);
1431 &saveq->list, 1679 __lpfc_sli_release_iocbq(phba,
1432 list) { 1680 rspiocbp);
1433 list_del(&rspiocbp->list);
1434 lpfc_sli_release_iocbq(phba,
1435 rspiocbp);
1436 }
1437 } 1681 }
1438 lpfc_sli_release_iocbq(phba, saveq); 1682 __lpfc_sli_release_iocbq(phba, saveq);
1439 } 1683 }
1684 rspiocbp = NULL;
1440 } 1685 }
1441 1686
1442 /* 1687 /*
@@ -1449,7 +1694,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1449 } 1694 }
1450 } /* while (pring->rspidx != portRspPut) */ 1695 } /* while (pring->rspidx != portRspPut) */
1451 1696
1452 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) { 1697 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1453 /* At least one response entry has been freed */ 1698 /* At least one response entry has been freed */
1454 pring->stats.iocb_rsp_full++; 1699 pring->stats.iocb_rsp_full++;
1455 /* SET RxRE_RSP in Chip Att register */ 1700 /* SET RxRE_RSP in Chip Att register */
@@ -1470,24 +1715,25 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1470 1715
1471 } 1716 }
1472 1717
1473 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1718 spin_unlock_irqrestore(&phba->hbalock, iflag);
1474 return rc; 1719 return rc;
1475} 1720}
1476 1721
1477int 1722void
1478lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1723lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1479{ 1724{
1480 LIST_HEAD(completions); 1725 LIST_HEAD(completions);
1481 struct lpfc_iocbq *iocb, *next_iocb; 1726 struct lpfc_iocbq *iocb, *next_iocb;
1482 IOCB_t *cmd = NULL; 1727 IOCB_t *cmd = NULL;
1483 int errcnt;
1484 1728
1485 errcnt = 0; 1729 if (pring->ringno == LPFC_ELS_RING) {
1730 lpfc_fabric_abort_hba(phba);
1731 }
1486 1732
1487 /* Error everything on txq and txcmplq 1733 /* Error everything on txq and txcmplq
1488 * First do the txq. 1734 * First do the txq.
1489 */ 1735 */
1490 spin_lock_irq(phba->host->host_lock); 1736 spin_lock_irq(&phba->hbalock);
1491 list_splice_init(&pring->txq, &completions); 1737 list_splice_init(&pring->txq, &completions);
1492 pring->txq_cnt = 0; 1738 pring->txq_cnt = 0;
1493 1739
@@ -1495,26 +1741,25 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1495 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 1741 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1496 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1742 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1497 1743
1498 spin_unlock_irq(phba->host->host_lock); 1744 spin_unlock_irq(&phba->hbalock);
1499 1745
1500 while (!list_empty(&completions)) { 1746 while (!list_empty(&completions)) {
1501 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1747 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1502 cmd = &iocb->iocb; 1748 cmd = &iocb->iocb;
1503 list_del(&iocb->list); 1749 list_del_init(&iocb->list);
1504 1750
1505 if (iocb->iocb_cmpl) { 1751 if (!iocb->iocb_cmpl)
1752 lpfc_sli_release_iocbq(phba, iocb);
1753 else {
1506 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1754 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1507 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1755 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1508 (iocb->iocb_cmpl) (phba, iocb, iocb); 1756 (iocb->iocb_cmpl) (phba, iocb, iocb);
1509 } else 1757 }
1510 lpfc_sli_release_iocbq(phba, iocb);
1511 } 1758 }
1512
1513 return errcnt;
1514} 1759}
1515 1760
1516int 1761int
1517lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) 1762lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1518{ 1763{
1519 uint32_t status; 1764 uint32_t status;
1520 int i = 0; 1765 int i = 0;
@@ -1541,7 +1786,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1541 msleep(2500); 1786 msleep(2500);
1542 1787
1543 if (i == 15) { 1788 if (i == 15) {
1544 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 1789 /* Do post */
1790 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1545 lpfc_sli_brdrestart(phba); 1791 lpfc_sli_brdrestart(phba);
1546 } 1792 }
1547 /* Read the HBA Host Status Register */ 1793 /* Read the HBA Host Status Register */
@@ -1550,7 +1796,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1550 1796
1551 /* Check to see if any errors occurred during init */ 1797 /* Check to see if any errors occurred during init */
1552 if ((status & HS_FFERM) || (i >= 20)) { 1798 if ((status & HS_FFERM) || (i >= 20)) {
1553 phba->hba_state = LPFC_HBA_ERROR; 1799 phba->link_state = LPFC_HBA_ERROR;
1554 retval = 1; 1800 retval = 1;
1555 } 1801 }
1556 1802
@@ -1559,7 +1805,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1559 1805
1560#define BARRIER_TEST_PATTERN (0xdeadbeef) 1806#define BARRIER_TEST_PATTERN (0xdeadbeef)
1561 1807
1562void lpfc_reset_barrier(struct lpfc_hba * phba) 1808void lpfc_reset_barrier(struct lpfc_hba *phba)
1563{ 1809{
1564 uint32_t __iomem *resp_buf; 1810 uint32_t __iomem *resp_buf;
1565 uint32_t __iomem *mbox_buf; 1811 uint32_t __iomem *mbox_buf;
@@ -1584,12 +1830,12 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1584 hc_copy = readl(phba->HCregaddr); 1830 hc_copy = readl(phba->HCregaddr);
1585 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 1831 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1586 readl(phba->HCregaddr); /* flush */ 1832 readl(phba->HCregaddr); /* flush */
1587 phba->fc_flag |= FC_IGNORE_ERATT; 1833 phba->link_flag |= LS_IGNORE_ERATT;
1588 1834
1589 if (readl(phba->HAregaddr) & HA_ERATT) { 1835 if (readl(phba->HAregaddr) & HA_ERATT) {
1590 /* Clear Chip error bit */ 1836 /* Clear Chip error bit */
1591 writel(HA_ERATT, phba->HAregaddr); 1837 writel(HA_ERATT, phba->HAregaddr);
1592 phba->stopped = 1; 1838 phba->pport->stopped = 1;
1593 } 1839 }
1594 1840
1595 mbox = 0; 1841 mbox = 0;
@@ -1606,7 +1852,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1606 1852
1607 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 1853 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1608 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 1854 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1609 phba->stopped) 1855 phba->pport->stopped)
1610 goto restore_hc; 1856 goto restore_hc;
1611 else 1857 else
1612 goto clear_errat; 1858 goto clear_errat;
@@ -1623,17 +1869,17 @@ clear_errat:
1623 1869
1624 if (readl(phba->HAregaddr) & HA_ERATT) { 1870 if (readl(phba->HAregaddr) & HA_ERATT) {
1625 writel(HA_ERATT, phba->HAregaddr); 1871 writel(HA_ERATT, phba->HAregaddr);
1626 phba->stopped = 1; 1872 phba->pport->stopped = 1;
1627 } 1873 }
1628 1874
1629restore_hc: 1875restore_hc:
1630 phba->fc_flag &= ~FC_IGNORE_ERATT; 1876 phba->link_flag &= ~LS_IGNORE_ERATT;
1631 writel(hc_copy, phba->HCregaddr); 1877 writel(hc_copy, phba->HCregaddr);
1632 readl(phba->HCregaddr); /* flush */ 1878 readl(phba->HCregaddr); /* flush */
1633} 1879}
1634 1880
1635int 1881int
1636lpfc_sli_brdkill(struct lpfc_hba * phba) 1882lpfc_sli_brdkill(struct lpfc_hba *phba)
1637{ 1883{
1638 struct lpfc_sli *psli; 1884 struct lpfc_sli *psli;
1639 LPFC_MBOXQ_t *pmb; 1885 LPFC_MBOXQ_t *pmb;
@@ -1645,26 +1891,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
1645 psli = &phba->sli; 1891 psli = &phba->sli;
1646 1892
1647 /* Kill HBA */ 1893 /* Kill HBA */
1648 lpfc_printf_log(phba, 1894 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1649 KERN_INFO, 1895 "%d:0329 Kill HBA Data: x%x x%x\n",
1650 LOG_SLI, 1896 phba->brd_no, phba->pport->port_state, psli->sli_flag);
1651 "%d:0329 Kill HBA Data: x%x x%x\n",
1652 phba->brd_no,
1653 phba->hba_state,
1654 psli->sli_flag);
1655 1897
1656 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1898 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1657 GFP_KERNEL)) == 0) 1899 GFP_KERNEL)) == 0)
1658 return 1; 1900 return 1;
1659 1901
1660 /* Disable the error attention */ 1902 /* Disable the error attention */
1661 spin_lock_irq(phba->host->host_lock); 1903 spin_lock_irq(&phba->hbalock);
1662 status = readl(phba->HCregaddr); 1904 status = readl(phba->HCregaddr);
1663 status &= ~HC_ERINT_ENA; 1905 status &= ~HC_ERINT_ENA;
1664 writel(status, phba->HCregaddr); 1906 writel(status, phba->HCregaddr);
1665 readl(phba->HCregaddr); /* flush */ 1907 readl(phba->HCregaddr); /* flush */
1666 phba->fc_flag |= FC_IGNORE_ERATT; 1908 phba->link_flag |= LS_IGNORE_ERATT;
1667 spin_unlock_irq(phba->host->host_lock); 1909 spin_unlock_irq(&phba->hbalock);
1668 1910
1669 lpfc_kill_board(phba, pmb); 1911 lpfc_kill_board(phba, pmb);
1670 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1912 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -1673,9 +1915,9 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
1673 if (retval != MBX_SUCCESS) { 1915 if (retval != MBX_SUCCESS) {
1674 if (retval != MBX_BUSY) 1916 if (retval != MBX_BUSY)
1675 mempool_free(pmb, phba->mbox_mem_pool); 1917 mempool_free(pmb, phba->mbox_mem_pool);
1676 spin_lock_irq(phba->host->host_lock); 1918 spin_lock_irq(&phba->hbalock);
1677 phba->fc_flag &= ~FC_IGNORE_ERATT; 1919 phba->link_flag &= ~LS_IGNORE_ERATT;
1678 spin_unlock_irq(phba->host->host_lock); 1920 spin_unlock_irq(&phba->hbalock);
1679 return 1; 1921 return 1;
1680 } 1922 }
1681 1923
@@ -1698,22 +1940,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
1698 del_timer_sync(&psli->mbox_tmo); 1940 del_timer_sync(&psli->mbox_tmo);
1699 if (ha_copy & HA_ERATT) { 1941 if (ha_copy & HA_ERATT) {
1700 writel(HA_ERATT, phba->HAregaddr); 1942 writel(HA_ERATT, phba->HAregaddr);
1701 phba->stopped = 1; 1943 phba->pport->stopped = 1;
1702 } 1944 }
1703 spin_lock_irq(phba->host->host_lock); 1945 spin_lock_irq(&phba->hbalock);
1704 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1946 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1705 phba->fc_flag &= ~FC_IGNORE_ERATT; 1947 phba->link_flag &= ~LS_IGNORE_ERATT;
1706 spin_unlock_irq(phba->host->host_lock); 1948 spin_unlock_irq(&phba->hbalock);
1707 1949
1708 psli->mbox_active = NULL; 1950 psli->mbox_active = NULL;
1709 lpfc_hba_down_post(phba); 1951 lpfc_hba_down_post(phba);
1710 phba->hba_state = LPFC_HBA_ERROR; 1952 phba->link_state = LPFC_HBA_ERROR;
1711 1953
1712 return (ha_copy & HA_ERATT ? 0 : 1); 1954 return ha_copy & HA_ERATT ? 0 : 1;
1713} 1955}
1714 1956
1715int 1957int
1716lpfc_sli_brdreset(struct lpfc_hba * phba) 1958lpfc_sli_brdreset(struct lpfc_hba *phba)
1717{ 1959{
1718 struct lpfc_sli *psli; 1960 struct lpfc_sli *psli;
1719 struct lpfc_sli_ring *pring; 1961 struct lpfc_sli_ring *pring;
@@ -1725,12 +1967,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
1725 /* Reset HBA */ 1967 /* Reset HBA */
1726 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1968 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1727 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no, 1969 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
1728 phba->hba_state, psli->sli_flag); 1970 phba->pport->port_state, psli->sli_flag);
1729 1971
1730 /* perform board reset */ 1972 /* perform board reset */
1731 phba->fc_eventTag = 0; 1973 phba->fc_eventTag = 0;
1732 phba->fc_myDID = 0; 1974 phba->pport->fc_myDID = 0;
1733 phba->fc_prevDID = 0; 1975 phba->pport->fc_prevDID = 0;
1734 1976
1735 /* Turn off parity checking and serr during the physical reset */ 1977 /* Turn off parity checking and serr during the physical reset */
1736 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 1978 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
@@ -1760,12 +2002,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
1760 pring->missbufcnt = 0; 2002 pring->missbufcnt = 0;
1761 } 2003 }
1762 2004
1763 phba->hba_state = LPFC_WARM_START; 2005 phba->link_state = LPFC_WARM_START;
1764 return 0; 2006 return 0;
1765} 2007}
1766 2008
1767int 2009int
1768lpfc_sli_brdrestart(struct lpfc_hba * phba) 2010lpfc_sli_brdrestart(struct lpfc_hba *phba)
1769{ 2011{
1770 MAILBOX_t *mb; 2012 MAILBOX_t *mb;
1771 struct lpfc_sli *psli; 2013 struct lpfc_sli *psli;
@@ -1773,14 +2015,14 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1773 volatile uint32_t word0; 2015 volatile uint32_t word0;
1774 void __iomem *to_slim; 2016 void __iomem *to_slim;
1775 2017
1776 spin_lock_irq(phba->host->host_lock); 2018 spin_lock_irq(&phba->hbalock);
1777 2019
1778 psli = &phba->sli; 2020 psli = &phba->sli;
1779 2021
1780 /* Restart HBA */ 2022 /* Restart HBA */
1781 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2023 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1782 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, 2024 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
1783 phba->hba_state, psli->sli_flag); 2025 phba->pport->port_state, psli->sli_flag);
1784 2026
1785 word0 = 0; 2027 word0 = 0;
1786 mb = (MAILBOX_t *) &word0; 2028 mb = (MAILBOX_t *) &word0;
@@ -1794,7 +2036,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1794 readl(to_slim); /* flush */ 2036 readl(to_slim); /* flush */
1795 2037
1796 /* Only skip post after fc_ffinit is completed */ 2038 /* Only skip post after fc_ffinit is completed */
1797 if (phba->hba_state) { 2039 if (phba->pport->port_state) {
1798 skip_post = 1; 2040 skip_post = 1;
1799 word0 = 1; /* This is really setting up word1 */ 2041 word0 = 1; /* This is really setting up word1 */
1800 } else { 2042 } else {
@@ -1806,10 +2048,10 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1806 readl(to_slim); /* flush */ 2048 readl(to_slim); /* flush */
1807 2049
1808 lpfc_sli_brdreset(phba); 2050 lpfc_sli_brdreset(phba);
1809 phba->stopped = 0; 2051 phba->pport->stopped = 0;
1810 phba->hba_state = LPFC_INIT_START; 2052 phba->link_state = LPFC_INIT_START;
1811 2053
1812 spin_unlock_irq(phba->host->host_lock); 2054 spin_unlock_irq(&phba->hbalock);
1813 2055
1814 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 2056 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
1815 psli->stats_start = get_seconds(); 2057 psli->stats_start = get_seconds();
@@ -1843,14 +2085,11 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1843 if (i++ >= 20) { 2085 if (i++ >= 20) {
1844 /* Adapter failed to init, timeout, status reg 2086 /* Adapter failed to init, timeout, status reg
1845 <status> */ 2087 <status> */
1846 lpfc_printf_log(phba, 2088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1847 KERN_ERR,
1848 LOG_INIT,
1849 "%d:0436 Adapter failed to init, " 2089 "%d:0436 Adapter failed to init, "
1850 "timeout, status reg x%x\n", 2090 "timeout, status reg x%x\n",
1851 phba->brd_no, 2091 phba->brd_no, status);
1852 status); 2092 phba->link_state = LPFC_HBA_ERROR;
1853 phba->hba_state = LPFC_HBA_ERROR;
1854 return -ETIMEDOUT; 2093 return -ETIMEDOUT;
1855 } 2094 }
1856 2095
@@ -1859,14 +2098,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1859 /* ERROR: During chipset initialization */ 2098 /* ERROR: During chipset initialization */
1860 /* Adapter failed to init, chipset, status reg 2099 /* Adapter failed to init, chipset, status reg
1861 <status> */ 2100 <status> */
1862 lpfc_printf_log(phba, 2101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1863 KERN_ERR,
1864 LOG_INIT,
1865 "%d:0437 Adapter failed to init, " 2102 "%d:0437 Adapter failed to init, "
1866 "chipset, status reg x%x\n", 2103 "chipset, status reg x%x\n",
1867 phba->brd_no, 2104 phba->brd_no,
1868 status); 2105 status);
1869 phba->hba_state = LPFC_HBA_ERROR; 2106 phba->link_state = LPFC_HBA_ERROR;
1870 return -EIO; 2107 return -EIO;
1871 } 2108 }
1872 2109
@@ -1879,7 +2116,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1879 } 2116 }
1880 2117
1881 if (i == 15) { 2118 if (i == 15) {
1882 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 2119 /* Do post */
2120 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1883 lpfc_sli_brdrestart(phba); 2121 lpfc_sli_brdrestart(phba);
1884 } 2122 }
1885 /* Read the HBA Host Status Register */ 2123 /* Read the HBA Host Status Register */
@@ -1890,14 +2128,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1890 if (status & HS_FFERM) { 2128 if (status & HS_FFERM) {
1891 /* ERROR: During chipset initialization */ 2129 /* ERROR: During chipset initialization */
1892 /* Adapter failed to init, chipset, status reg <status> */ 2130 /* Adapter failed to init, chipset, status reg <status> */
1893 lpfc_printf_log(phba, 2131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1894 KERN_ERR,
1895 LOG_INIT,
1896 "%d:0438 Adapter failed to init, chipset, " 2132 "%d:0438 Adapter failed to init, chipset, "
1897 "status reg x%x\n", 2133 "status reg x%x\n",
1898 phba->brd_no, 2134 phba->brd_no,
1899 status); 2135 status);
1900 phba->hba_state = LPFC_HBA_ERROR; 2136 phba->link_state = LPFC_HBA_ERROR;
1901 return -EIO; 2137 return -EIO;
1902 } 2138 }
1903 2139
@@ -1911,80 +2147,253 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1911 return 0; 2147 return 0;
1912} 2148}
1913 2149
2150static int
2151lpfc_sli_hbq_count(void)
2152{
2153 return ARRAY_SIZE(lpfc_hbq_defs);
2154}
2155
2156static int
2157lpfc_sli_hbq_entry_count(void)
2158{
2159 int hbq_count = lpfc_sli_hbq_count();
2160 int count = 0;
2161 int i;
2162
2163 for (i = 0; i < hbq_count; ++i)
2164 count += lpfc_hbq_defs[i]->entry_count;
2165 return count;
2166}
2167
1914int 2168int
1915lpfc_sli_hba_setup(struct lpfc_hba * phba) 2169lpfc_sli_hbq_size(void)
2170{
2171 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2172}
2173
2174static int
2175lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2176{
2177 int hbq_count = lpfc_sli_hbq_count();
2178 LPFC_MBOXQ_t *pmb;
2179 MAILBOX_t *pmbox;
2180 uint32_t hbqno;
2181 uint32_t hbq_entry_index;
2182
2183 /* Get a Mailbox buffer to setup mailbox
2184 * commands for HBA initialization
2185 */
2186 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2187
2188 if (!pmb)
2189 return -ENOMEM;
2190
2191 pmbox = &pmb->mb;
2192
2193 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2194 phba->link_state = LPFC_INIT_MBX_CMDS;
2195
2196 hbq_entry_index = 0;
2197 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2198 phba->hbqs[hbqno].next_hbqPutIdx = 0;
2199 phba->hbqs[hbqno].hbqPutIdx = 0;
2200 phba->hbqs[hbqno].local_hbqGetIdx = 0;
2201 phba->hbqs[hbqno].entry_count =
2202 lpfc_hbq_defs[hbqno]->entry_count;
2203 lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
2204 pmb);
2205 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2206
2207 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2208 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2209 mbxStatus <status>, ring <num> */
2210
2211 lpfc_printf_log(phba, KERN_ERR,
2212 LOG_SLI | LOG_VPORT,
2213 "%d:1805 Adapter failed to init. "
2214 "Data: x%x x%x x%x\n",
2215 phba->brd_no, pmbox->mbxCommand,
2216 pmbox->mbxStatus, hbqno);
2217
2218 phba->link_state = LPFC_HBA_ERROR;
2219 mempool_free(pmb, phba->mbox_mem_pool);
2220 return ENXIO;
2221 }
2222 }
2223 phba->hbq_count = hbq_count;
2224
2225 mempool_free(pmb, phba->mbox_mem_pool);
2226
2227 /* Initially populate or replenish the HBQs */
2228 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2229 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2230 return -ENOMEM;
2231 }
2232 return 0;
2233}
2234
2235static int
2236lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
1916{ 2237{
1917 LPFC_MBOXQ_t *pmb; 2238 LPFC_MBOXQ_t *pmb;
1918 uint32_t resetcount = 0, rc = 0, done = 0; 2239 uint32_t resetcount = 0, rc = 0, done = 0;
1919 2240
1920 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2241 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1921 if (!pmb) { 2242 if (!pmb) {
1922 phba->hba_state = LPFC_HBA_ERROR; 2243 phba->link_state = LPFC_HBA_ERROR;
1923 return -ENOMEM; 2244 return -ENOMEM;
1924 } 2245 }
1925 2246
2247 phba->sli_rev = sli_mode;
1926 while (resetcount < 2 && !done) { 2248 while (resetcount < 2 && !done) {
1927 spin_lock_irq(phba->host->host_lock); 2249 spin_lock_irq(&phba->hbalock);
1928 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2250 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1929 spin_unlock_irq(phba->host->host_lock); 2251 spin_unlock_irq(&phba->hbalock);
1930 phba->hba_state = LPFC_STATE_UNKNOWN; 2252 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1931 lpfc_sli_brdrestart(phba); 2253 lpfc_sli_brdrestart(phba);
1932 msleep(2500); 2254 msleep(2500);
1933 rc = lpfc_sli_chipset_init(phba); 2255 rc = lpfc_sli_chipset_init(phba);
1934 if (rc) 2256 if (rc)
1935 break; 2257 break;
1936 2258
1937 spin_lock_irq(phba->host->host_lock); 2259 spin_lock_irq(&phba->hbalock);
1938 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2260 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1939 spin_unlock_irq(phba->host->host_lock); 2261 spin_unlock_irq(&phba->hbalock);
1940 resetcount++; 2262 resetcount++;
1941 2263
1942 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 2264 /* Call pre CONFIG_PORT mailbox command initialization. A
1943 * means the call was successful. Any other nonzero value is a failure, 2265 * value of 0 means the call was successful. Any other
1944 * but if ERESTART is returned, the driver may reset the HBA and try 2266 * nonzero value is a failure, but if ERESTART is returned,
1945 * again. 2267 * the driver may reset the HBA and try again.
1946 */ 2268 */
1947 rc = lpfc_config_port_prep(phba); 2269 rc = lpfc_config_port_prep(phba);
1948 if (rc == -ERESTART) { 2270 if (rc == -ERESTART) {
1949 phba->hba_state = 0; 2271 phba->link_state = LPFC_LINK_UNKNOWN;
1950 continue; 2272 continue;
1951 } else if (rc) { 2273 } else if (rc) {
1952 break; 2274 break;
1953 } 2275 }
1954 2276
1955 phba->hba_state = LPFC_INIT_MBX_CMDS; 2277 phba->link_state = LPFC_INIT_MBX_CMDS;
1956 lpfc_config_port(phba, pmb); 2278 lpfc_config_port(phba, pmb);
1957 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2279 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1958 if (rc == MBX_SUCCESS) 2280 if (rc != MBX_SUCCESS) {
1959 done = 1;
1960 else {
1961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1962 "%d:0442 Adapter failed to init, mbxCmd x%x " 2282 "%d:0442 Adapter failed to init, mbxCmd x%x "
1963 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 2283 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1964 phba->brd_no, pmb->mb.mbxCommand, 2284 phba->brd_no, pmb->mb.mbxCommand,
1965 pmb->mb.mbxStatus, 0); 2285 pmb->mb.mbxStatus, 0);
2286 spin_lock_irq(&phba->hbalock);
1966 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 2287 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2288 spin_unlock_irq(&phba->hbalock);
2289 rc = -ENXIO;
2290 } else {
2291 done = 1;
2292 phba->max_vpi = (phba->max_vpi &&
2293 pmb->mb.un.varCfgPort.gmv) != 0
2294 ? pmb->mb.un.varCfgPort.max_vpi
2295 : 0;
2296 }
2297 }
2298
2299 if (!done) {
2300 rc = -EINVAL;
2301 goto do_prep_failed;
2302 }
2303
2304 if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2305 (!pmb->mb.un.varCfgPort.cMA)) {
2306 rc = -ENXIO;
2307 goto do_prep_failed;
2308 }
2309 return rc;
2310
2311do_prep_failed:
2312 mempool_free(pmb, phba->mbox_mem_pool);
2313 return rc;
2314}
2315
2316int
2317lpfc_sli_hba_setup(struct lpfc_hba *phba)
2318{
2319 uint32_t rc;
2320 int mode = 3;
2321
2322 switch (lpfc_sli_mode) {
2323 case 2:
2324 if (phba->cfg_npiv_enable) {
2325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2326 "%d:1824 NPIV enabled: Override lpfc_sli_mode "
2327 "parameter (%d) to auto (0).\n",
2328 phba->brd_no, lpfc_sli_mode);
2329 break;
1967 } 2330 }
2331 mode = 2;
2332 break;
2333 case 0:
2334 case 3:
2335 break;
2336 default:
2337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2338 "%d:1819 Unrecognized lpfc_sli_mode "
2339 "parameter: %d.\n",
2340 phba->brd_no, lpfc_sli_mode);
2341
2342 break;
1968 } 2343 }
1969 if (!done) 2344
2345 rc = lpfc_do_config_port(phba, mode);
2346 if (rc && lpfc_sli_mode == 3)
2347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2348 "%d:1820 Unable to select SLI-3. "
2349 "Not supported by adapter.\n",
2350 phba->brd_no);
2351 if (rc && mode != 2)
2352 rc = lpfc_do_config_port(phba, 2);
2353 if (rc)
1970 goto lpfc_sli_hba_setup_error; 2354 goto lpfc_sli_hba_setup_error;
1971 2355
1972 rc = lpfc_sli_ring_map(phba, pmb); 2356 if (phba->sli_rev == 3) {
2357 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2358 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2359 phba->sli3_options |= LPFC_SLI3_ENABLED;
2360 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2361
2362 } else {
2363 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2364 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2365 phba->sli3_options = 0;
2366 }
2367
2368 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2369 "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
2370 phba->brd_no, phba->sli_rev, phba->max_vpi);
2371 rc = lpfc_sli_ring_map(phba);
1973 2372
1974 if (rc) 2373 if (rc)
1975 goto lpfc_sli_hba_setup_error; 2374 goto lpfc_sli_hba_setup_error;
1976 2375
2376 /* Init HBQs */
2377
2378 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2379 rc = lpfc_sli_hbq_setup(phba);
2380 if (rc)
2381 goto lpfc_sli_hba_setup_error;
2382 }
2383
1977 phba->sli.sli_flag |= LPFC_PROCESS_LA; 2384 phba->sli.sli_flag |= LPFC_PROCESS_LA;
1978 2385
1979 rc = lpfc_config_port_post(phba); 2386 rc = lpfc_config_port_post(phba);
1980 if (rc) 2387 if (rc)
1981 goto lpfc_sli_hba_setup_error; 2388 goto lpfc_sli_hba_setup_error;
1982 2389
1983 goto lpfc_sli_hba_setup_exit; 2390 return rc;
2391
1984lpfc_sli_hba_setup_error: 2392lpfc_sli_hba_setup_error:
1985 phba->hba_state = LPFC_HBA_ERROR; 2393 phba->link_state = LPFC_HBA_ERROR;
1986lpfc_sli_hba_setup_exit: 2394 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1987 mempool_free(pmb, phba->mbox_mem_pool); 2395 "%d:0445 Firmware initialization failed\n",
2396 phba->brd_no);
1988 return rc; 2397 return rc;
1989} 2398}
1990 2399
@@ -2004,56 +2413,58 @@ lpfc_sli_hba_setup_exit:
2004void 2413void
2005lpfc_mbox_timeout(unsigned long ptr) 2414lpfc_mbox_timeout(unsigned long ptr)
2006{ 2415{
2007 struct lpfc_hba *phba; 2416 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2008 unsigned long iflag; 2417 unsigned long iflag;
2418 uint32_t tmo_posted;
2009 2419
2010 phba = (struct lpfc_hba *)ptr; 2420 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2011 spin_lock_irqsave(phba->host->host_lock, iflag); 2421 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2012 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 2422 if (!tmo_posted)
2013 phba->work_hba_events |= WORKER_MBOX_TMO; 2423 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2424 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2425
2426 if (!tmo_posted) {
2427 spin_lock_irqsave(&phba->hbalock, iflag);
2014 if (phba->work_wait) 2428 if (phba->work_wait)
2015 wake_up(phba->work_wait); 2429 lpfc_worker_wake_up(phba);
2430 spin_unlock_irqrestore(&phba->hbalock, iflag);
2016 } 2431 }
2017 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2018} 2432}
2019 2433
2020void 2434void
2021lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 2435lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2022{ 2436{
2023 LPFC_MBOXQ_t *pmbox; 2437 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2024 MAILBOX_t *mb; 2438 MAILBOX_t *mb = &pmbox->mb;
2025 struct lpfc_sli *psli = &phba->sli; 2439 struct lpfc_sli *psli = &phba->sli;
2026 struct lpfc_sli_ring *pring; 2440 struct lpfc_sli_ring *pring;
2027 2441
2028 spin_lock_irq(phba->host->host_lock); 2442 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
2029 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
2030 spin_unlock_irq(phba->host->host_lock);
2031 return; 2443 return;
2032 } 2444 }
2033 2445
2034 pmbox = phba->sli.mbox_active;
2035 mb = &pmbox->mb;
2036
2037 /* Mbox cmd <mbxCommand> timeout */ 2446 /* Mbox cmd <mbxCommand> timeout */
2038 lpfc_printf_log(phba, 2447 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2039 KERN_ERR, 2448 "%d:0310 Mailbox command x%x timeout Data: x%x x%x "
2040 LOG_MBOX | LOG_SLI, 2449 "x%p\n",
2041 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2450 phba->brd_no,
2042 phba->brd_no, 2451 mb->mbxCommand,
2043 mb->mbxCommand, 2452 phba->pport->port_state,
2044 phba->hba_state, 2453 phba->sli.sli_flag,
2045 phba->sli.sli_flag, 2454 phba->sli.mbox_active);
2046 phba->sli.mbox_active);
2047 2455
2048 /* Setting state unknown so lpfc_sli_abort_iocb_ring 2456 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2049 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 2457 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2050 * it to fail all oustanding SCSI IO. 2458 * it to fail all oustanding SCSI IO.
2051 */ 2459 */
2052 phba->hba_state = LPFC_STATE_UNKNOWN; 2460 spin_lock_irq(&phba->pport->work_port_lock);
2053 phba->work_hba_events &= ~WORKER_MBOX_TMO; 2461 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2054 phba->fc_flag |= FC_ESTABLISH_LINK; 2462 spin_unlock_irq(&phba->pport->work_port_lock);
2463 spin_lock_irq(&phba->hbalock);
2464 phba->link_state = LPFC_LINK_UNKNOWN;
2465 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2055 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2466 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2056 spin_unlock_irq(phba->host->host_lock); 2467 spin_unlock_irq(&phba->hbalock);
2057 2468
2058 pring = &psli->ring[psli->fcp_ring]; 2469 pring = &psli->ring[psli->fcp_ring];
2059 lpfc_sli_abort_iocb_ring(phba, pring); 2470 lpfc_sli_abort_iocb_ring(phba, pring);
@@ -2075,10 +2486,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2075} 2486}
2076 2487
2077int 2488int
2078lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) 2489lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2079{ 2490{
2080 MAILBOX_t *mb; 2491 MAILBOX_t *mb;
2081 struct lpfc_sli *psli; 2492 struct lpfc_sli *psli = &phba->sli;
2082 uint32_t status, evtctr; 2493 uint32_t status, evtctr;
2083 uint32_t ha_copy; 2494 uint32_t ha_copy;
2084 int i; 2495 int i;
@@ -2086,31 +2497,44 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2086 volatile uint32_t word0, ldata; 2497 volatile uint32_t word0, ldata;
2087 void __iomem *to_slim; 2498 void __iomem *to_slim;
2088 2499
2500 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2501 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2502 if(!pmbox->vport) {
2503 lpfc_printf_log(phba, KERN_ERR,
2504 LOG_MBOX | LOG_VPORT,
2505 "%d:1806 Mbox x%x failed. No vport\n",
2506 phba->brd_no,
2507 pmbox->mb.mbxCommand);
2508 dump_stack();
2509 return MBXERR_ERROR;
2510 }
2511 }
2512
2513
2089 /* If the PCI channel is in offline state, do not post mbox. */ 2514 /* If the PCI channel is in offline state, do not post mbox. */
2090 if (unlikely(pci_channel_offline(phba->pcidev))) 2515 if (unlikely(pci_channel_offline(phba->pcidev)))
2091 return MBX_NOT_FINISHED; 2516 return MBX_NOT_FINISHED;
2092 2517
2518 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2093 psli = &phba->sli; 2519 psli = &phba->sli;
2094 2520
2095 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2096
2097 2521
2098 mb = &pmbox->mb; 2522 mb = &pmbox->mb;
2099 status = MBX_SUCCESS; 2523 status = MBX_SUCCESS;
2100 2524
2101 if (phba->hba_state == LPFC_HBA_ERROR) { 2525 if (phba->link_state == LPFC_HBA_ERROR) {
2102 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2526 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2103 2527
2104 /* Mbox command <mbxCommand> cannot issue */ 2528 /* Mbox command <mbxCommand> cannot issue */
2105 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2529 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2106 return (MBX_NOT_FINISHED); 2530 return MBX_NOT_FINISHED;
2107 } 2531 }
2108 2532
2109 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2533 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2110 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2534 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2111 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2535 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2112 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2536 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2113 return (MBX_NOT_FINISHED); 2537 return MBX_NOT_FINISHED;
2114 } 2538 }
2115 2539
2116 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 2540 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -2120,20 +2544,18 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2120 */ 2544 */
2121 2545
2122 if (flag & MBX_POLL) { 2546 if (flag & MBX_POLL) {
2123 spin_unlock_irqrestore(phba->host->host_lock, 2547 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2124 drvr_flag);
2125 2548
2126 /* Mbox command <mbxCommand> cannot issue */ 2549 /* Mbox command <mbxCommand> cannot issue */
2127 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2550 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2128 return (MBX_NOT_FINISHED); 2551 return MBX_NOT_FINISHED;
2129 } 2552 }
2130 2553
2131 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2554 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2132 spin_unlock_irqrestore(phba->host->host_lock, 2555 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2133 drvr_flag);
2134 /* Mbox command <mbxCommand> cannot issue */ 2556 /* Mbox command <mbxCommand> cannot issue */
2135 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2557 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2136 return (MBX_NOT_FINISHED); 2558 return MBX_NOT_FINISHED;
2137 } 2559 }
2138 2560
2139 /* Handle STOP IOCB processing flag. This is only meaningful 2561 /* Handle STOP IOCB processing flag. This is only meaningful
@@ -2157,21 +2579,33 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2157 lpfc_mbox_put(phba, pmbox); 2579 lpfc_mbox_put(phba, pmbox);
2158 2580
2159 /* Mbox cmd issue - BUSY */ 2581 /* Mbox cmd issue - BUSY */
2160 lpfc_printf_log(phba, 2582 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2161 KERN_INFO, 2583 "%d (%d):0308 Mbox cmd issue - BUSY Data: "
2162 LOG_MBOX | LOG_SLI, 2584 "x%x x%x x%x x%x\n",
2163 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n", 2585 phba->brd_no,
2164 phba->brd_no, 2586 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2165 mb->mbxCommand, 2587 mb->mbxCommand, phba->pport->port_state,
2166 phba->hba_state, 2588 psli->sli_flag, flag);
2167 psli->sli_flag,
2168 flag);
2169 2589
2170 psli->slistat.mbox_busy++; 2590 psli->slistat.mbox_busy++;
2171 spin_unlock_irqrestore(phba->host->host_lock, 2591 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2172 drvr_flag); 2592
2593 if (pmbox->vport) {
2594 lpfc_debugfs_disc_trc(pmbox->vport,
2595 LPFC_DISC_TRC_MBOX_VPORT,
2596 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
2597 (uint32_t)mb->mbxCommand,
2598 mb->un.varWords[0], mb->un.varWords[1]);
2599 }
2600 else {
2601 lpfc_debugfs_disc_trc(phba->pport,
2602 LPFC_DISC_TRC_MBOX,
2603 "MBOX Bsy: cmd:x%x mb:x%x x%x",
2604 (uint32_t)mb->mbxCommand,
2605 mb->un.varWords[0], mb->un.varWords[1]);
2606 }
2173 2607
2174 return (MBX_BUSY); 2608 return MBX_BUSY;
2175 } 2609 }
2176 2610
2177 /* Handle STOP IOCB processing flag. This is only meaningful 2611 /* Handle STOP IOCB processing flag. This is only meaningful
@@ -2198,11 +2632,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2198 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 2632 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2199 (mb->mbxCommand != MBX_KILL_BOARD)) { 2633 (mb->mbxCommand != MBX_KILL_BOARD)) {
2200 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2634 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2201 spin_unlock_irqrestore(phba->host->host_lock, 2635 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2202 drvr_flag);
2203 /* Mbox command <mbxCommand> cannot issue */ 2636 /* Mbox command <mbxCommand> cannot issue */
2204 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag); 2637 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2205 return (MBX_NOT_FINISHED); 2638 return MBX_NOT_FINISHED;
2206 } 2639 }
2207 /* timeout active mbox command */ 2640 /* timeout active mbox command */
2208 mod_timer(&psli->mbox_tmo, (jiffies + 2641 mod_timer(&psli->mbox_tmo, (jiffies +
@@ -2210,15 +2643,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2210 } 2643 }
2211 2644
2212 /* Mailbox cmd <cmd> issue */ 2645 /* Mailbox cmd <cmd> issue */
2213 lpfc_printf_log(phba, 2646 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2214 KERN_INFO, 2647 "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2215 LOG_MBOX | LOG_SLI, 2648 "x%x\n",
2216 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n", 2649 phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
2217 phba->brd_no, 2650 mb->mbxCommand, phba->pport->port_state,
2218 mb->mbxCommand, 2651 psli->sli_flag, flag);
2219 phba->hba_state, 2652
2220 psli->sli_flag, 2653 if (mb->mbxCommand != MBX_HEARTBEAT) {
2221 flag); 2654 if (pmbox->vport) {
2655 lpfc_debugfs_disc_trc(pmbox->vport,
2656 LPFC_DISC_TRC_MBOX_VPORT,
2657 "MBOX Send vport: cmd:x%x mb:x%x x%x",
2658 (uint32_t)mb->mbxCommand,
2659 mb->un.varWords[0], mb->un.varWords[1]);
2660 }
2661 else {
2662 lpfc_debugfs_disc_trc(phba->pport,
2663 LPFC_DISC_TRC_MBOX,
2664 "MBOX Send: cmd:x%x mb:x%x x%x",
2665 (uint32_t)mb->mbxCommand,
2666 mb->un.varWords[0], mb->un.varWords[1]);
2667 }
2668 }
2222 2669
2223 psli->slistat.mbox_cmd++; 2670 psli->slistat.mbox_cmd++;
2224 evtctr = psli->slistat.mbox_event; 2671 evtctr = psli->slistat.mbox_event;
@@ -2233,7 +2680,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2233 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2680 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2234 /* copy command data into host mbox for cmpl */ 2681 /* copy command data into host mbox for cmpl */
2235 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2682 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2236 MAILBOX_CMD_SIZE); 2683 MAILBOX_CMD_SIZE);
2237 } 2684 }
2238 2685
2239 /* First copy mbox command data to HBA SLIM, skip past first 2686 /* First copy mbox command data to HBA SLIM, skip past first
@@ -2285,12 +2732,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2285 /* Wait for command to complete */ 2732 /* Wait for command to complete */
2286 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2733 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2287 (!(ha_copy & HA_MBATT) && 2734 (!(ha_copy & HA_MBATT) &&
2288 (phba->hba_state > LPFC_WARM_START))) { 2735 (phba->link_state > LPFC_WARM_START))) {
2289 if (i-- <= 0) { 2736 if (i-- <= 0) {
2290 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2737 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2291 spin_unlock_irqrestore(phba->host->host_lock, 2738 spin_unlock_irqrestore(&phba->hbalock,
2292 drvr_flag); 2739 drvr_flag);
2293 return (MBX_NOT_FINISHED); 2740 return MBX_NOT_FINISHED;
2294 } 2741 }
2295 2742
2296 /* Check if we took a mbox interrupt while we were 2743 /* Check if we took a mbox interrupt while we were
@@ -2299,12 +2746,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2299 && (evtctr != psli->slistat.mbox_event)) 2746 && (evtctr != psli->slistat.mbox_event))
2300 break; 2747 break;
2301 2748
2302 spin_unlock_irqrestore(phba->host->host_lock, 2749 spin_unlock_irqrestore(&phba->hbalock,
2303 drvr_flag); 2750 drvr_flag);
2304 2751
2305 msleep(1); 2752 msleep(1);
2306 2753
2307 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2754 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2308 2755
2309 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2756 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2310 /* First copy command data */ 2757 /* First copy command data */
@@ -2335,7 +2782,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2335 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2782 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2336 /* copy results back to user */ 2783 /* copy results back to user */
2337 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2784 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2338 MAILBOX_CMD_SIZE); 2785 MAILBOX_CMD_SIZE);
2339 } else { 2786 } else {
2340 /* First copy command data */ 2787 /* First copy command data */
2341 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2788 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
@@ -2355,23 +2802,25 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2355 status = mb->mbxStatus; 2802 status = mb->mbxStatus;
2356 } 2803 }
2357 2804
2358 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2805 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2359 return (status); 2806 return status;
2360} 2807}
2361 2808
2362static int 2809/*
2363lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2810 * Caller needs to hold lock.
2364 struct lpfc_iocbq * piocb) 2811 */
2812static void
2813__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2814 struct lpfc_iocbq *piocb)
2365{ 2815{
2366 /* Insert the caller's iocb in the txq tail for later processing. */ 2816 /* Insert the caller's iocb in the txq tail for later processing. */
2367 list_add_tail(&piocb->list, &pring->txq); 2817 list_add_tail(&piocb->list, &pring->txq);
2368 pring->txq_cnt++; 2818 pring->txq_cnt++;
2369 return (0);
2370} 2819}
2371 2820
2372static struct lpfc_iocbq * 2821static struct lpfc_iocbq *
2373lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2822lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2374 struct lpfc_iocbq ** piocb) 2823 struct lpfc_iocbq **piocb)
2375{ 2824{
2376 struct lpfc_iocbq * nextiocb; 2825 struct lpfc_iocbq * nextiocb;
2377 2826
@@ -2384,13 +2833,29 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2384 return nextiocb; 2833 return nextiocb;
2385} 2834}
2386 2835
2836/*
2837 * Lockless version of lpfc_sli_issue_iocb.
2838 */
2387int 2839int
2388lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2840__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2389 struct lpfc_iocbq *piocb, uint32_t flag) 2841 struct lpfc_iocbq *piocb, uint32_t flag)
2390{ 2842{
2391 struct lpfc_iocbq *nextiocb; 2843 struct lpfc_iocbq *nextiocb;
2392 IOCB_t *iocb; 2844 IOCB_t *iocb;
2393 2845
2846 if (piocb->iocb_cmpl && (!piocb->vport) &&
2847 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2848 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2849 lpfc_printf_log(phba, KERN_ERR,
2850 LOG_SLI | LOG_VPORT,
2851 "%d:1807 IOCB x%x failed. No vport\n",
2852 phba->brd_no,
2853 piocb->iocb.ulpCommand);
2854 dump_stack();
2855 return IOCB_ERROR;
2856 }
2857
2858
2394 /* If the PCI channel is in offline state, do not post iocbs. */ 2859 /* If the PCI channel is in offline state, do not post iocbs. */
2395 if (unlikely(pci_channel_offline(phba->pcidev))) 2860 if (unlikely(pci_channel_offline(phba->pcidev)))
2396 return IOCB_ERROR; 2861 return IOCB_ERROR;
@@ -2398,7 +2863,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2398 /* 2863 /*
2399 * We should never get an IOCB if we are in a < LINK_DOWN state 2864 * We should never get an IOCB if we are in a < LINK_DOWN state
2400 */ 2865 */
2401 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 2866 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
2402 return IOCB_ERROR; 2867 return IOCB_ERROR;
2403 2868
2404 /* 2869 /*
@@ -2408,7 +2873,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2408 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) 2873 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2409 goto iocb_busy; 2874 goto iocb_busy;
2410 2875
2411 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) { 2876 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
2412 /* 2877 /*
2413 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 2878 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2414 * can be issued if the link is not up. 2879 * can be issued if the link is not up.
@@ -2436,8 +2901,9 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2436 * attention events. 2901 * attention events.
2437 */ 2902 */
2438 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 2903 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2439 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) 2904 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2440 goto iocb_busy; 2905 goto iocb_busy;
2906 }
2441 2907
2442 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2908 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2443 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 2909 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
@@ -2459,13 +2925,28 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2459 out_busy: 2925 out_busy:
2460 2926
2461 if (!(flag & SLI_IOCB_RET_IOCB)) { 2927 if (!(flag & SLI_IOCB_RET_IOCB)) {
2462 lpfc_sli_ringtx_put(phba, pring, piocb); 2928 __lpfc_sli_ringtx_put(phba, pring, piocb);
2463 return IOCB_SUCCESS; 2929 return IOCB_SUCCESS;
2464 } 2930 }
2465 2931
2466 return IOCB_BUSY; 2932 return IOCB_BUSY;
2467} 2933}
2468 2934
2935
2936int
2937lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2938 struct lpfc_iocbq *piocb, uint32_t flag)
2939{
2940 unsigned long iflags;
2941 int rc;
2942
2943 spin_lock_irqsave(&phba->hbalock, iflags);
2944 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2945 spin_unlock_irqrestore(&phba->hbalock, iflags);
2946
2947 return rc;
2948}
2949
2469static int 2950static int
2470lpfc_extra_ring_setup( struct lpfc_hba *phba) 2951lpfc_extra_ring_setup( struct lpfc_hba *phba)
2471{ 2952{
@@ -2504,7 +2985,7 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
2504int 2985int
2505lpfc_sli_setup(struct lpfc_hba *phba) 2986lpfc_sli_setup(struct lpfc_hba *phba)
2506{ 2987{
2507 int i, totiocb = 0; 2988 int i, totiocbsize = 0;
2508 struct lpfc_sli *psli = &phba->sli; 2989 struct lpfc_sli *psli = &phba->sli;
2509 struct lpfc_sli_ring *pring; 2990 struct lpfc_sli_ring *pring;
2510 2991
@@ -2529,6 +3010,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2529 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3010 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2530 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3011 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2531 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3012 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
3013 pring->sizeCiocb = (phba->sli_rev == 3) ?
3014 SLI3_IOCB_CMD_SIZE :
3015 SLI2_IOCB_CMD_SIZE;
3016 pring->sizeRiocb = (phba->sli_rev == 3) ?
3017 SLI3_IOCB_RSP_SIZE :
3018 SLI2_IOCB_RSP_SIZE;
2532 pring->iotag_ctr = 0; 3019 pring->iotag_ctr = 0;
2533 pring->iotag_max = 3020 pring->iotag_max =
2534 (phba->cfg_hba_queue_depth * 2); 3021 (phba->cfg_hba_queue_depth * 2);
@@ -2539,12 +3026,25 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2539 /* numCiocb and numRiocb are used in config_port */ 3026 /* numCiocb and numRiocb are used in config_port */
2540 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 3027 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2541 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 3028 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
3029 pring->sizeCiocb = (phba->sli_rev == 3) ?
3030 SLI3_IOCB_CMD_SIZE :
3031 SLI2_IOCB_CMD_SIZE;
3032 pring->sizeRiocb = (phba->sli_rev == 3) ?
3033 SLI3_IOCB_RSP_SIZE :
3034 SLI2_IOCB_RSP_SIZE;
3035 pring->iotag_max = phba->cfg_hba_queue_depth;
2542 pring->num_mask = 0; 3036 pring->num_mask = 0;
2543 break; 3037 break;
2544 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 3038 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
2545 /* numCiocb and numRiocb are used in config_port */ 3039 /* numCiocb and numRiocb are used in config_port */
2546 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 3040 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2547 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 3041 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
3042 pring->sizeCiocb = (phba->sli_rev == 3) ?
3043 SLI3_IOCB_CMD_SIZE :
3044 SLI2_IOCB_CMD_SIZE;
3045 pring->sizeRiocb = (phba->sli_rev == 3) ?
3046 SLI3_IOCB_RSP_SIZE :
3047 SLI2_IOCB_RSP_SIZE;
2548 pring->fast_iotag = 0; 3048 pring->fast_iotag = 0;
2549 pring->iotag_ctr = 0; 3049 pring->iotag_ctr = 0;
2550 pring->iotag_max = 4096; 3050 pring->iotag_max = 4096;
@@ -2575,14 +3075,16 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2575 lpfc_ct_unsol_event; 3075 lpfc_ct_unsol_event;
2576 break; 3076 break;
2577 } 3077 }
2578 totiocb += (pring->numCiocb + pring->numRiocb); 3078 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3079 (pring->numRiocb * pring->sizeRiocb);
2579 } 3080 }
2580 if (totiocb > MAX_SLI2_IOCB) { 3081 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
2581 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 3082 /* Too many cmd / rsp ring entries in SLI2 SLIM */
2582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2583 "%d:0462 Too many cmd / rsp ring entries in " 3084 "%d:0462 Too many cmd / rsp ring entries in "
2584 "SLI2 SLIM Data: x%x x%x\n", 3085 "SLI2 SLIM Data: x%x x%lx\n",
2585 phba->brd_no, totiocb, MAX_SLI2_IOCB); 3086 phba->brd_no, totiocbsize,
3087 (unsigned long) MAX_SLIM_IOCB_SIZE);
2586 } 3088 }
2587 if (phba->cfg_multi_ring_support == 2) 3089 if (phba->cfg_multi_ring_support == 2)
2588 lpfc_extra_ring_setup(phba); 3090 lpfc_extra_ring_setup(phba);
@@ -2591,15 +3093,16 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2591} 3093}
2592 3094
2593int 3095int
2594lpfc_sli_queue_setup(struct lpfc_hba * phba) 3096lpfc_sli_queue_setup(struct lpfc_hba *phba)
2595{ 3097{
2596 struct lpfc_sli *psli; 3098 struct lpfc_sli *psli;
2597 struct lpfc_sli_ring *pring; 3099 struct lpfc_sli_ring *pring;
2598 int i; 3100 int i;
2599 3101
2600 psli = &phba->sli; 3102 psli = &phba->sli;
2601 spin_lock_irq(phba->host->host_lock); 3103 spin_lock_irq(&phba->hbalock);
2602 INIT_LIST_HEAD(&psli->mboxq); 3104 INIT_LIST_HEAD(&psli->mboxq);
3105 INIT_LIST_HEAD(&psli->mboxq_cmpl);
2603 /* Initialize list headers for txq and txcmplq as double linked lists */ 3106 /* Initialize list headers for txq and txcmplq as double linked lists */
2604 for (i = 0; i < psli->num_rings; i++) { 3107 for (i = 0; i < psli->num_rings; i++) {
2605 pring = &psli->ring[i]; 3108 pring = &psli->ring[i];
@@ -2612,15 +3115,73 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
2612 INIT_LIST_HEAD(&pring->iocb_continueq); 3115 INIT_LIST_HEAD(&pring->iocb_continueq);
2613 INIT_LIST_HEAD(&pring->postbufq); 3116 INIT_LIST_HEAD(&pring->postbufq);
2614 } 3117 }
2615 spin_unlock_irq(phba->host->host_lock); 3118 spin_unlock_irq(&phba->hbalock);
2616 return (1); 3119 return 1;
2617} 3120}
2618 3121
2619int 3122int
2620lpfc_sli_hba_down(struct lpfc_hba * phba) 3123lpfc_sli_host_down(struct lpfc_vport *vport)
2621{ 3124{
2622 LIST_HEAD(completions); 3125 LIST_HEAD(completions);
2623 struct lpfc_sli *psli; 3126 struct lpfc_hba *phba = vport->phba;
3127 struct lpfc_sli *psli = &phba->sli;
3128 struct lpfc_sli_ring *pring;
3129 struct lpfc_iocbq *iocb, *next_iocb;
3130 int i;
3131 unsigned long flags = 0;
3132 uint16_t prev_pring_flag;
3133
3134 lpfc_cleanup_discovery_resources(vport);
3135
3136 spin_lock_irqsave(&phba->hbalock, flags);
3137 for (i = 0; i < psli->num_rings; i++) {
3138 pring = &psli->ring[i];
3139 prev_pring_flag = pring->flag;
3140 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3141 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3142 /*
3143 * Error everything on the txq since these iocbs have not been
3144 * given to the FW yet.
3145 */
3146 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3147 if (iocb->vport != vport)
3148 continue;
3149 list_move_tail(&iocb->list, &completions);
3150 pring->txq_cnt--;
3151 }
3152
3153 /* Next issue ABTS for everything on the txcmplq */
3154 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3155 list) {
3156 if (iocb->vport != vport)
3157 continue;
3158 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3159 }
3160
3161 pring->flag = prev_pring_flag;
3162 }
3163
3164 spin_unlock_irqrestore(&phba->hbalock, flags);
3165
3166 while (!list_empty(&completions)) {
3167 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3168
3169 if (!iocb->iocb_cmpl)
3170 lpfc_sli_release_iocbq(phba, iocb);
3171 else {
3172 iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3173 iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
3174 (iocb->iocb_cmpl) (phba, iocb, iocb);
3175 }
3176 }
3177 return 1;
3178}
3179
3180int
3181lpfc_sli_hba_down(struct lpfc_hba *phba)
3182{
3183 LIST_HEAD(completions);
3184 struct lpfc_sli *psli = &phba->sli;
2624 struct lpfc_sli_ring *pring; 3185 struct lpfc_sli_ring *pring;
2625 LPFC_MBOXQ_t *pmb; 3186 LPFC_MBOXQ_t *pmb;
2626 struct lpfc_iocbq *iocb; 3187 struct lpfc_iocbq *iocb;
@@ -2628,13 +3189,15 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2628 int i; 3189 int i;
2629 unsigned long flags = 0; 3190 unsigned long flags = 0;
2630 3191
2631 psli = &phba->sli;
2632 lpfc_hba_down_prep(phba); 3192 lpfc_hba_down_prep(phba);
2633 3193
2634 spin_lock_irqsave(phba->host->host_lock, flags); 3194 lpfc_fabric_abort_hba(phba);
3195
3196 spin_lock_irqsave(&phba->hbalock, flags);
2635 for (i = 0; i < psli->num_rings; i++) { 3197 for (i = 0; i < psli->num_rings; i++) {
2636 pring = &psli->ring[i]; 3198 pring = &psli->ring[i];
2637 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3199 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3200 pring->flag |= LPFC_DEFERRED_RING_EVENT;
2638 3201
2639 /* 3202 /*
2640 * Error everything on the txq since these iocbs have not been 3203 * Error everything on the txq since these iocbs have not been
@@ -2644,51 +3207,50 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2644 pring->txq_cnt = 0; 3207 pring->txq_cnt = 0;
2645 3208
2646 } 3209 }
2647 spin_unlock_irqrestore(phba->host->host_lock, flags); 3210 spin_unlock_irqrestore(&phba->hbalock, flags);
2648 3211
2649 while (!list_empty(&completions)) { 3212 while (!list_empty(&completions)) {
2650 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 3213 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
2651 cmd = &iocb->iocb; 3214 cmd = &iocb->iocb;
2652 list_del(&iocb->list);
2653 3215
2654 if (iocb->iocb_cmpl) { 3216 if (!iocb->iocb_cmpl)
3217 lpfc_sli_release_iocbq(phba, iocb);
3218 else {
2655 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3219 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2656 cmd->un.ulpWord[4] = IOERR_SLI_DOWN; 3220 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2657 (iocb->iocb_cmpl) (phba, iocb, iocb); 3221 (iocb->iocb_cmpl) (phba, iocb, iocb);
2658 } else 3222 }
2659 lpfc_sli_release_iocbq(phba, iocb);
2660 } 3223 }
2661 3224
2662 /* Return any active mbox cmds */ 3225 /* Return any active mbox cmds */
2663 del_timer_sync(&psli->mbox_tmo); 3226 del_timer_sync(&psli->mbox_tmo);
2664 spin_lock_irqsave(phba->host->host_lock, flags); 3227 spin_lock_irqsave(&phba->hbalock, flags);
2665 phba->work_hba_events &= ~WORKER_MBOX_TMO; 3228
3229 spin_lock(&phba->pport->work_port_lock);
3230 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3231 spin_unlock(&phba->pport->work_port_lock);
3232
2666 if (psli->mbox_active) { 3233 if (psli->mbox_active) {
2667 pmb = psli->mbox_active; 3234 list_add_tail(&psli->mbox_active->list, &completions);
2668 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 3235 psli->mbox_active = NULL;
2669 if (pmb->mbox_cmpl) { 3236 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2670 spin_unlock_irqrestore(phba->host->host_lock, flags);
2671 pmb->mbox_cmpl(phba,pmb);
2672 spin_lock_irqsave(phba->host->host_lock, flags);
2673 }
2674 } 3237 }
2675 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2676 psli->mbox_active = NULL;
2677 3238
2678 /* Return any pending mbox cmds */ 3239 /* Return any pending or completed mbox cmds */
2679 while ((pmb = lpfc_mbox_get(phba)) != NULL) { 3240 list_splice_init(&phba->sli.mboxq, &completions);
3241 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3242 INIT_LIST_HEAD(&psli->mboxq);
3243 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3244
3245 spin_unlock_irqrestore(&phba->hbalock, flags);
3246
3247 while (!list_empty(&completions)) {
3248 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
2680 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 3249 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2681 if (pmb->mbox_cmpl) { 3250 if (pmb->mbox_cmpl) {
2682 spin_unlock_irqrestore(phba->host->host_lock, flags);
2683 pmb->mbox_cmpl(phba,pmb); 3251 pmb->mbox_cmpl(phba,pmb);
2684 spin_lock_irqsave(phba->host->host_lock, flags);
2685 } 3252 }
2686 } 3253 }
2687
2688 INIT_LIST_HEAD(&psli->mboxq);
2689
2690 spin_unlock_irqrestore(phba->host->host_lock, flags);
2691
2692 return 1; 3254 return 1;
2693} 3255}
2694 3256
@@ -2710,14 +3272,15 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
2710} 3272}
2711 3273
2712int 3274int
2713lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 3275lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2714 struct lpfc_dmabuf * mp) 3276 struct lpfc_dmabuf *mp)
2715{ 3277{
2716 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 3278 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2717 later */ 3279 later */
3280 spin_lock_irq(&phba->hbalock);
2718 list_add_tail(&mp->list, &pring->postbufq); 3281 list_add_tail(&mp->list, &pring->postbufq);
2719
2720 pring->postbufq_cnt++; 3282 pring->postbufq_cnt++;
3283 spin_unlock_irq(&phba->hbalock);
2721 return 0; 3284 return 0;
2722} 3285}
2723 3286
@@ -2730,14 +3293,17 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2730 struct list_head *slp = &pring->postbufq; 3293 struct list_head *slp = &pring->postbufq;
2731 3294
2732 /* Search postbufq, from the begining, looking for a match on phys */ 3295 /* Search postbufq, from the begining, looking for a match on phys */
3296 spin_lock_irq(&phba->hbalock);
2733 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 3297 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
2734 if (mp->phys == phys) { 3298 if (mp->phys == phys) {
2735 list_del_init(&mp->list); 3299 list_del_init(&mp->list);
2736 pring->postbufq_cnt--; 3300 pring->postbufq_cnt--;
3301 spin_unlock_irq(&phba->hbalock);
2737 return mp; 3302 return mp;
2738 } 3303 }
2739 } 3304 }
2740 3305
3306 spin_unlock_irq(&phba->hbalock);
2741 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2742 "%d:0410 Cannot find virtual addr for mapped buf on " 3308 "%d:0410 Cannot find virtual addr for mapped buf on "
2743 "ring %d Data x%llx x%p x%p x%x\n", 3309 "ring %d Data x%llx x%p x%p x%x\n",
@@ -2747,92 +3313,110 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2747} 3313}
2748 3314
2749static void 3315static void
2750lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3316lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2751 struct lpfc_iocbq * rspiocb) 3317 struct lpfc_iocbq *rspiocb)
2752{ 3318{
2753 IOCB_t *irsp; 3319 IOCB_t *irsp = &rspiocb->iocb;
2754 uint16_t abort_iotag, abort_context; 3320 uint16_t abort_iotag, abort_context;
2755 struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb; 3321 struct lpfc_iocbq *abort_iocb;
2756 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3322 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2757 3323
2758 abort_iocb = NULL; 3324 abort_iocb = NULL;
2759 irsp = &rspiocb->iocb;
2760
2761 spin_lock_irq(phba->host->host_lock);
2762 3325
2763 if (irsp->ulpStatus) { 3326 if (irsp->ulpStatus) {
2764 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 3327 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
2765 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 3328 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
2766 3329
3330 spin_lock_irq(&phba->hbalock);
2767 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 3331 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
2768 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 3332 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
2769 3333
2770 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3334 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
2771 "%d:0327 Cannot abort els iocb %p" 3335 "%d:0327 Cannot abort els iocb %p "
2772 " with tag %x context %x\n", 3336 "with tag %x context %x, abort status %x, "
2773 phba->brd_no, abort_iocb, 3337 "abort code %x\n",
2774 abort_iotag, abort_context); 3338 phba->brd_no, abort_iocb, abort_iotag,
3339 abort_context, irsp->ulpStatus,
3340 irsp->un.ulpWord[4]);
2775 3341
2776 /* 3342 /*
2777 * make sure we have the right iocbq before taking it 3343 * make sure we have the right iocbq before taking it
2778 * off the txcmplq and try to call completion routine. 3344 * off the txcmplq and try to call completion routine.
2779 */ 3345 */
2780 if (abort_iocb && 3346 if (!abort_iocb ||
2781 abort_iocb->iocb.ulpContext == abort_context && 3347 abort_iocb->iocb.ulpContext != abort_context ||
2782 abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) { 3348 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
2783 list_del(&abort_iocb->list); 3349 spin_unlock_irq(&phba->hbalock);
3350 else {
3351 list_del_init(&abort_iocb->list);
2784 pring->txcmplq_cnt--; 3352 pring->txcmplq_cnt--;
3353 spin_unlock_irq(&phba->hbalock);
2785 3354
2786 rsp_ab_iocb = lpfc_sli_get_iocbq(phba); 3355 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2787 if (rsp_ab_iocb == NULL) 3356 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
2788 lpfc_sli_release_iocbq(phba, abort_iocb); 3357 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
2789 else { 3358 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
2790 abort_iocb->iocb_flag &=
2791 ~LPFC_DRIVER_ABORTED;
2792 rsp_ab_iocb->iocb.ulpStatus =
2793 IOSTAT_LOCAL_REJECT;
2794 rsp_ab_iocb->iocb.un.ulpWord[4] =
2795 IOERR_SLI_ABORTED;
2796 spin_unlock_irq(phba->host->host_lock);
2797 (abort_iocb->iocb_cmpl)
2798 (phba, abort_iocb, rsp_ab_iocb);
2799 spin_lock_irq(phba->host->host_lock);
2800 lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
2801 }
2802 } 3359 }
2803 } 3360 }
2804 3361
2805 lpfc_sli_release_iocbq(phba, cmdiocb); 3362 lpfc_sli_release_iocbq(phba, cmdiocb);
2806 spin_unlock_irq(phba->host->host_lock); 3363 return;
3364}
3365
3366static void
3367lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3368 struct lpfc_iocbq *rspiocb)
3369{
3370 IOCB_t *irsp = &rspiocb->iocb;
3371
3372 /* ELS cmd tag <ulpIoTag> completes */
3373 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3374 "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
3375 "x%x x%x x%x\n",
3376 phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
3377 irsp->un.ulpWord[4], irsp->ulpTimeout);
3378 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
3379 lpfc_ct_free_iocb(phba, cmdiocb);
3380 else
3381 lpfc_els_free_iocb(phba, cmdiocb);
2807 return; 3382 return;
2808} 3383}
2809 3384
2810int 3385int
2811lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba, 3386lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2812 struct lpfc_sli_ring * pring, 3387 struct lpfc_iocbq *cmdiocb)
2813 struct lpfc_iocbq * cmdiocb)
2814{ 3388{
3389 struct lpfc_vport *vport = cmdiocb->vport;
2815 struct lpfc_iocbq *abtsiocbp; 3390 struct lpfc_iocbq *abtsiocbp;
2816 IOCB_t *icmd = NULL; 3391 IOCB_t *icmd = NULL;
2817 IOCB_t *iabt = NULL; 3392 IOCB_t *iabt = NULL;
2818 int retval = IOCB_ERROR; 3393 int retval = IOCB_ERROR;
2819 3394
2820 /* There are certain command types we don't want 3395 /*
2821 * to abort. 3396 * There are certain command types we don't want to abort. And we
3397 * don't want to abort commands that are already in the process of
3398 * being aborted.
2822 */ 3399 */
2823 icmd = &cmdiocb->iocb; 3400 icmd = &cmdiocb->iocb;
2824 if ((icmd->ulpCommand == CMD_ABORT_XRI_CN) || 3401 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
2825 (icmd->ulpCommand == CMD_CLOSE_XRI_CN)) 3402 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3403 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
2826 return 0; 3404 return 0;
2827 3405
2828 /* If we're unloading, interrupts are disabled so we 3406 /* If we're unloading, don't abort iocb on the ELS ring, but change the
2829 * need to cleanup the iocb here. 3407 * callback so that nothing happens when it finishes.
2830 */ 3408 */
2831 if (phba->fc_flag & FC_UNLOADING) 3409 if ((vport->load_flag & FC_UNLOADING) &&
3410 (pring->ringno == LPFC_ELS_RING)) {
3411 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3412 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3413 else
3414 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
2832 goto abort_iotag_exit; 3415 goto abort_iotag_exit;
3416 }
2833 3417
2834 /* issue ABTS for this IOCB based on iotag */ 3418 /* issue ABTS for this IOCB based on iotag */
2835 abtsiocbp = lpfc_sli_get_iocbq(phba); 3419 abtsiocbp = __lpfc_sli_get_iocbq(phba);
2836 if (abtsiocbp == NULL) 3420 if (abtsiocbp == NULL)
2837 return 0; 3421 return 0;
2838 3422
@@ -2848,7 +3432,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
2848 iabt->ulpLe = 1; 3432 iabt->ulpLe = 1;
2849 iabt->ulpClass = icmd->ulpClass; 3433 iabt->ulpClass = icmd->ulpClass;
2850 3434
2851 if (phba->hba_state >= LPFC_LINK_UP) 3435 if (phba->link_state >= LPFC_LINK_UP)
2852 iabt->ulpCommand = CMD_ABORT_XRI_CN; 3436 iabt->ulpCommand = CMD_ABORT_XRI_CN;
2853 else 3437 else
2854 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 3438 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
@@ -2856,32 +3440,20 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
2856 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 3440 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
2857 3441
2858 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3442 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2859 "%d:0339 Abort xri x%x, original iotag x%x, abort " 3443 "%d (%d):0339 Abort xri x%x, original iotag x%x, "
2860 "cmd iotag x%x\n", 3444 "abort cmd iotag x%x\n",
2861 phba->brd_no, iabt->un.acxri.abortContextTag, 3445 phba->brd_no, vport->vpi,
3446 iabt->un.acxri.abortContextTag,
2862 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 3447 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
2863 retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 3448 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
2864 3449
2865abort_iotag_exit: 3450abort_iotag_exit:
2866 3451 /*
2867 /* If we could not issue an abort dequeue the iocb and handle 3452 * Caller to this routine should check for IOCB_ERROR
2868 * the completion here. 3453 * and handle it properly. This routine no longer removes
3454 * iocb off txcmplq and call compl in case of IOCB_ERROR.
2869 */ 3455 */
2870 if (retval == IOCB_ERROR) { 3456 return retval;
2871 list_del(&cmdiocb->list);
2872 pring->txcmplq_cnt--;
2873
2874 if (cmdiocb->iocb_cmpl) {
2875 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2876 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2877 spin_unlock_irq(phba->host->host_lock);
2878 (cmdiocb->iocb_cmpl) (phba, cmdiocb, cmdiocb);
2879 spin_lock_irq(phba->host->host_lock);
2880 } else
2881 lpfc_sli_release_iocbq(phba, cmdiocb);
2882 }
2883
2884 return 1;
2885} 3457}
2886 3458
2887static int 3459static int
@@ -2930,7 +3502,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
2930 3502
2931int 3503int
2932lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3504lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2933 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) 3505 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
2934{ 3506{
2935 struct lpfc_iocbq *iocbq; 3507 struct lpfc_iocbq *iocbq;
2936 int sum, i; 3508 int sum, i;
@@ -2947,14 +3519,10 @@ lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2947} 3519}
2948 3520
2949void 3521void
2950lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3522lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2951 struct lpfc_iocbq * rspiocb) 3523 struct lpfc_iocbq *rspiocb)
2952{ 3524{
2953 unsigned long iflags;
2954
2955 spin_lock_irqsave(phba->host->host_lock, iflags);
2956 lpfc_sli_release_iocbq(phba, cmdiocb); 3525 lpfc_sli_release_iocbq(phba, cmdiocb);
2957 spin_unlock_irqrestore(phba->host->host_lock, iflags);
2958 return; 3526 return;
2959} 3527}
2960 3528
@@ -2972,8 +3540,8 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2972 for (i = 1; i <= phba->sli.last_iotag; i++) { 3540 for (i = 1; i <= phba->sli.last_iotag; i++) {
2973 iocbq = phba->sli.iocbq_lookup[i]; 3541 iocbq = phba->sli.iocbq_lookup[i];
2974 3542
2975 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id, 3543 if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0,
2976 0, abort_cmd) != 0) 3544 abort_cmd) != 0)
2977 continue; 3545 continue;
2978 3546
2979 /* issue ABTS for this IOCB based on iotag */ 3547 /* issue ABTS for this IOCB based on iotag */
@@ -2989,8 +3557,9 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2989 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 3557 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
2990 abtsiocb->iocb.ulpLe = 1; 3558 abtsiocb->iocb.ulpLe = 1;
2991 abtsiocb->iocb.ulpClass = cmd->ulpClass; 3559 abtsiocb->iocb.ulpClass = cmd->ulpClass;
3560 abtsiocb->vport = phba->pport;
2992 3561
2993 if (phba->hba_state >= LPFC_LINK_UP) 3562 if (lpfc_is_link_up(phba))
2994 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 3563 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
2995 else 3564 else
2996 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 3565 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
@@ -3016,16 +3585,16 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3016 wait_queue_head_t *pdone_q; 3585 wait_queue_head_t *pdone_q;
3017 unsigned long iflags; 3586 unsigned long iflags;
3018 3587
3019 spin_lock_irqsave(phba->host->host_lock, iflags); 3588 spin_lock_irqsave(&phba->hbalock, iflags);
3020 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3589 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3021 if (cmdiocbq->context2 && rspiocbq) 3590 if (cmdiocbq->context2 && rspiocbq)
3022 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3591 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3023 &rspiocbq->iocb, sizeof(IOCB_t)); 3592 &rspiocbq->iocb, sizeof(IOCB_t));
3024 3593
3025 pdone_q = cmdiocbq->context_un.wait_queue; 3594 pdone_q = cmdiocbq->context_un.wait_queue;
3026 spin_unlock_irqrestore(phba->host->host_lock, iflags);
3027 if (pdone_q) 3595 if (pdone_q)
3028 wake_up(pdone_q); 3596 wake_up(pdone_q);
3597 spin_unlock_irqrestore(&phba->hbalock, iflags);
3029 return; 3598 return;
3030} 3599}
3031 3600
@@ -3035,11 +3604,12 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3035 * lpfc_sli_issue_call since the wake routine sets a unique value and by 3604 * lpfc_sli_issue_call since the wake routine sets a unique value and by
3036 * definition this is a wait function. 3605 * definition this is a wait function.
3037 */ 3606 */
3607
3038int 3608int
3039lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, 3609lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3040 struct lpfc_sli_ring * pring, 3610 struct lpfc_sli_ring *pring,
3041 struct lpfc_iocbq * piocb, 3611 struct lpfc_iocbq *piocb,
3042 struct lpfc_iocbq * prspiocbq, 3612 struct lpfc_iocbq *prspiocbq,
3043 uint32_t timeout) 3613 uint32_t timeout)
3044{ 3614{
3045 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3615 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
@@ -3071,11 +3641,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3071 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 3641 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3072 if (retval == IOCB_SUCCESS) { 3642 if (retval == IOCB_SUCCESS) {
3073 timeout_req = timeout * HZ; 3643 timeout_req = timeout * HZ;
3074 spin_unlock_irq(phba->host->host_lock);
3075 timeleft = wait_event_timeout(done_q, 3644 timeleft = wait_event_timeout(done_q,
3076 piocb->iocb_flag & LPFC_IO_WAKE, 3645 piocb->iocb_flag & LPFC_IO_WAKE,
3077 timeout_req); 3646 timeout_req);
3078 spin_lock_irq(phba->host->host_lock);
3079 3647
3080 if (piocb->iocb_flag & LPFC_IO_WAKE) { 3648 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3081 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3649 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3117,16 +3685,16 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3117} 3685}
3118 3686
3119int 3687int
3120lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 3688lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3121 uint32_t timeout) 3689 uint32_t timeout)
3122{ 3690{
3123 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3691 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3124 int retval; 3692 int retval;
3693 unsigned long flag;
3125 3694
3126 /* The caller must leave context1 empty. */ 3695 /* The caller must leave context1 empty. */
3127 if (pmboxq->context1 != 0) { 3696 if (pmboxq->context1 != 0)
3128 return (MBX_NOT_FINISHED); 3697 return MBX_NOT_FINISHED;
3129 }
3130 3698
3131 /* setup wake call as IOCB callback */ 3699 /* setup wake call as IOCB callback */
3132 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 3700 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
@@ -3141,6 +3709,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3141 pmboxq->mbox_flag & LPFC_MBX_WAKE, 3709 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3142 timeout * HZ); 3710 timeout * HZ);
3143 3711
3712 spin_lock_irqsave(&phba->hbalock, flag);
3144 pmboxq->context1 = NULL; 3713 pmboxq->context1 = NULL;
3145 /* 3714 /*
3146 * if LPFC_MBX_WAKE flag is set the mailbox is completed 3715 * if LPFC_MBX_WAKE flag is set the mailbox is completed
@@ -3148,8 +3717,11 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3148 */ 3717 */
3149 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) 3718 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3150 retval = MBX_SUCCESS; 3719 retval = MBX_SUCCESS;
3151 else 3720 else {
3152 retval = MBX_TIMEOUT; 3721 retval = MBX_TIMEOUT;
3722 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3723 }
3724 spin_unlock_irqrestore(&phba->hbalock, flag);
3153 } 3725 }
3154 3726
3155 return retval; 3727 return retval;
@@ -3158,14 +3730,27 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3158int 3730int
3159lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 3731lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3160{ 3732{
3733 struct lpfc_vport *vport = phba->pport;
3161 int i = 0; 3734 int i = 0;
3735 uint32_t ha_copy;
3162 3736
3163 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) { 3737 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
3164 if (i++ > LPFC_MBOX_TMO * 1000) 3738 if (i++ > LPFC_MBOX_TMO * 1000)
3165 return 1; 3739 return 1;
3166 3740
3167 if (lpfc_sli_handle_mb_event(phba) == 0) 3741 /*
3168 i = 0; 3742 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3743 * did finish. This way we won't get the misleading
3744 * "Stray Mailbox Interrupt" message.
3745 */
3746 spin_lock_irq(&phba->hbalock);
3747 ha_copy = phba->work_ha;
3748 phba->work_ha &= ~HA_MBATT;
3749 spin_unlock_irq(&phba->hbalock);
3750
3751 if (ha_copy & HA_MBATT)
3752 if (lpfc_sli_handle_mb_event(phba) == 0)
3753 i = 0;
3169 3754
3170 msleep(1); 3755 msleep(1);
3171 } 3756 }
@@ -3176,13 +3761,20 @@ lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3176irqreturn_t 3761irqreturn_t
3177lpfc_intr_handler(int irq, void *dev_id) 3762lpfc_intr_handler(int irq, void *dev_id)
3178{ 3763{
3179 struct lpfc_hba *phba; 3764 struct lpfc_hba *phba;
3180 uint32_t ha_copy; 3765 uint32_t ha_copy;
3181 uint32_t work_ha_copy; 3766 uint32_t work_ha_copy;
3182 unsigned long status; 3767 unsigned long status;
3183 int i; 3768 int i;
3184 uint32_t control; 3769 uint32_t control;
3185 3770
3771 MAILBOX_t *mbox, *pmbox;
3772 struct lpfc_vport *vport;
3773 struct lpfc_nodelist *ndlp;
3774 struct lpfc_dmabuf *mp;
3775 LPFC_MBOXQ_t *pmb;
3776 int rc;
3777
3186 /* 3778 /*
3187 * Get the driver's phba structure from the dev_id and 3779 * Get the driver's phba structure from the dev_id and
3188 * assume the HBA is not interrupting. 3780 * assume the HBA is not interrupting.
@@ -3204,7 +3796,7 @@ lpfc_intr_handler(int irq, void *dev_id)
3204 */ 3796 */
3205 3797
3206 /* Ignore all interrupts during initialization. */ 3798 /* Ignore all interrupts during initialization. */
3207 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 3799 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
3208 return IRQ_NONE; 3800 return IRQ_NONE;
3209 3801
3210 /* 3802 /*
@@ -3212,16 +3804,16 @@ lpfc_intr_handler(int irq, void *dev_id)
3212 * Clear Attention Sources, except Error Attention (to 3804 * Clear Attention Sources, except Error Attention (to
3213 * preserve status) and Link Attention 3805 * preserve status) and Link Attention
3214 */ 3806 */
3215 spin_lock(phba->host->host_lock); 3807 spin_lock(&phba->hbalock);
3216 ha_copy = readl(phba->HAregaddr); 3808 ha_copy = readl(phba->HAregaddr);
3217 /* If somebody is waiting to handle an eratt don't process it 3809 /* If somebody is waiting to handle an eratt don't process it
3218 * here. The brdkill function will do this. 3810 * here. The brdkill function will do this.
3219 */ 3811 */
3220 if (phba->fc_flag & FC_IGNORE_ERATT) 3812 if (phba->link_flag & LS_IGNORE_ERATT)
3221 ha_copy &= ~HA_ERATT; 3813 ha_copy &= ~HA_ERATT;
3222 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 3814 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3223 readl(phba->HAregaddr); /* flush */ 3815 readl(phba->HAregaddr); /* flush */
3224 spin_unlock(phba->host->host_lock); 3816 spin_unlock(&phba->hbalock);
3225 3817
3226 if (unlikely(!ha_copy)) 3818 if (unlikely(!ha_copy))
3227 return IRQ_NONE; 3819 return IRQ_NONE;
@@ -3235,36 +3827,41 @@ lpfc_intr_handler(int irq, void *dev_id)
3235 * Turn off Link Attention interrupts 3827 * Turn off Link Attention interrupts
3236 * until CLEAR_LA done 3828 * until CLEAR_LA done
3237 */ 3829 */
3238 spin_lock(phba->host->host_lock); 3830 spin_lock(&phba->hbalock);
3239 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 3831 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3240 control = readl(phba->HCregaddr); 3832 control = readl(phba->HCregaddr);
3241 control &= ~HC_LAINT_ENA; 3833 control &= ~HC_LAINT_ENA;
3242 writel(control, phba->HCregaddr); 3834 writel(control, phba->HCregaddr);
3243 readl(phba->HCregaddr); /* flush */ 3835 readl(phba->HCregaddr); /* flush */
3244 spin_unlock(phba->host->host_lock); 3836 spin_unlock(&phba->hbalock);
3245 } 3837 }
3246 else 3838 else
3247 work_ha_copy &= ~HA_LATT; 3839 work_ha_copy &= ~HA_LATT;
3248 } 3840 }
3249 3841
3250 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 3842 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3251 for (i = 0; i < phba->sli.num_rings; i++) { 3843 /*
3252 if (work_ha_copy & (HA_RXATT << (4*i))) { 3844 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
3253 /* 3845 * the only slow ring.
3254 * Turn off Slow Rings interrupts 3846 */
3255 */ 3847 status = (work_ha_copy &
3256 spin_lock(phba->host->host_lock); 3848 (HA_RXMASK << (4*LPFC_ELS_RING)));
3257 control = readl(phba->HCregaddr); 3849 status >>= (4*LPFC_ELS_RING);
3258 control &= ~(HC_R0INT_ENA << i); 3850 if (status & HA_RXMASK) {
3851 spin_lock(&phba->hbalock);
3852 control = readl(phba->HCregaddr);
3853 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
3854 control &=
3855 ~(HC_R0INT_ENA << LPFC_ELS_RING);
3259 writel(control, phba->HCregaddr); 3856 writel(control, phba->HCregaddr);
3260 readl(phba->HCregaddr); /* flush */ 3857 readl(phba->HCregaddr); /* flush */
3261 spin_unlock(phba->host->host_lock);
3262 } 3858 }
3859 spin_unlock(&phba->hbalock);
3263 } 3860 }
3264 } 3861 }
3265 3862
3266 if (work_ha_copy & HA_ERATT) { 3863 if (work_ha_copy & HA_ERATT) {
3267 phba->hba_state = LPFC_HBA_ERROR; 3864 phba->link_state = LPFC_HBA_ERROR;
3268 /* 3865 /*
3269 * There was a link/board error. Read the 3866 * There was a link/board error. Read the
3270 * status register to retrieve the error event 3867 * status register to retrieve the error event
@@ -3279,14 +3876,108 @@ lpfc_intr_handler(int irq, void *dev_id)
3279 /* Clear Chip error bit */ 3876 /* Clear Chip error bit */
3280 writel(HA_ERATT, phba->HAregaddr); 3877 writel(HA_ERATT, phba->HAregaddr);
3281 readl(phba->HAregaddr); /* flush */ 3878 readl(phba->HAregaddr); /* flush */
3282 phba->stopped = 1; 3879 phba->pport->stopped = 1;
3880 }
3881
3882 if ((work_ha_copy & HA_MBATT) &&
3883 (phba->sli.mbox_active)) {
3884 pmb = phba->sli.mbox_active;
3885 pmbox = &pmb->mb;
3886 mbox = &phba->slim2p->mbx;
3887 vport = pmb->vport;
3888
3889 /* First check out the status word */
3890 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3891 if (pmbox->mbxOwner != OWN_HOST) {
3892 /*
3893 * Stray Mailbox Interrupt, mbxCommand <cmd>
3894 * mbxStatus <status>
3895 */
3896 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3897 LOG_SLI,
3898 "%d (%d):0304 Stray Mailbox "
3899 "Interrupt mbxCommand x%x "
3900 "mbxStatus x%x\n",
3901 phba->brd_no,
3902 (vport
3903 ? vport->vpi : 0),
3904 pmbox->mbxCommand,
3905 pmbox->mbxStatus);
3906 }
3907 phba->last_completion_time = jiffies;
3908 del_timer_sync(&phba->sli.mbox_tmo);
3909
3910 phba->sli.mbox_active = NULL;
3911 if (pmb->mbox_cmpl) {
3912 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3913 MAILBOX_CMD_SIZE);
3914 }
3915 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
3916 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
3917
3918 lpfc_debugfs_disc_trc(vport,
3919 LPFC_DISC_TRC_MBOX_VPORT,
3920 "MBOX dflt rpi: : status:x%x rpi:x%x",
3921 (uint32_t)pmbox->mbxStatus,
3922 pmbox->un.varWords[0], 0);
3923
3924 if ( !pmbox->mbxStatus) {
3925 mp = (struct lpfc_dmabuf *)
3926 (pmb->context1);
3927 ndlp = (struct lpfc_nodelist *)
3928 pmb->context2;
3929
3930 /* Reg_LOGIN of dflt RPI was successful.
3931 * new lets get rid of the RPI using the
3932 * same mbox buffer.
3933 */
3934 lpfc_unreg_login(phba, vport->vpi,
3935 pmbox->un.varWords[0], pmb);
3936 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3937 pmb->context1 = mp;
3938 pmb->context2 = ndlp;
3939 pmb->vport = vport;
3940 spin_lock(&phba->hbalock);
3941 phba->sli.sli_flag &=
3942 ~LPFC_SLI_MBOX_ACTIVE;
3943 spin_unlock(&phba->hbalock);
3944 goto send_current_mbox;
3945 }
3946 }
3947 spin_lock(&phba->pport->work_port_lock);
3948 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3949 spin_unlock(&phba->pport->work_port_lock);
3950 lpfc_mbox_cmpl_put(phba, pmb);
3951 }
3952 if ((work_ha_copy & HA_MBATT) &&
3953 (phba->sli.mbox_active == NULL)) {
3954send_next_mbox:
3955 spin_lock(&phba->hbalock);
3956 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3957 pmb = lpfc_mbox_get(phba);
3958 spin_unlock(&phba->hbalock);
3959send_current_mbox:
3960 /* Process next mailbox command if there is one */
3961 if (pmb != NULL) {
3962 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3963 if (rc == MBX_NOT_FINISHED) {
3964 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3965 lpfc_mbox_cmpl_put(phba, pmb);
3966 goto send_next_mbox;
3967 }
3968 } else {
3969 /* Turn on IOCB processing */
3970 for (i = 0; i < phba->sli.num_rings; i++)
3971 lpfc_sli_turn_on_ring(phba, i);
3972 }
3973
3283 } 3974 }
3284 3975
3285 spin_lock(phba->host->host_lock); 3976 spin_lock(&phba->hbalock);
3286 phba->work_ha |= work_ha_copy; 3977 phba->work_ha |= work_ha_copy;
3287 if (phba->work_wait) 3978 if (phba->work_wait)
3288 wake_up(phba->work_wait); 3979 lpfc_worker_wake_up(phba);
3289 spin_unlock(phba->host->host_lock); 3980 spin_unlock(&phba->hbalock);
3290 } 3981 }
3291 3982
3292 ha_copy &= ~(phba->work_ha_mask); 3983 ha_copy &= ~(phba->work_ha_mask);
@@ -3298,7 +3989,7 @@ lpfc_intr_handler(int irq, void *dev_id)
3298 */ 3989 */
3299 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 3990 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
3300 status >>= (4*LPFC_FCP_RING); 3991 status >>= (4*LPFC_FCP_RING);
3301 if (status & HA_RXATT) 3992 if (status & HA_RXMASK)
3302 lpfc_sli_handle_fast_ring_event(phba, 3993 lpfc_sli_handle_fast_ring_event(phba,
3303 &phba->sli.ring[LPFC_FCP_RING], 3994 &phba->sli.ring[LPFC_FCP_RING],
3304 status); 3995 status);
@@ -3311,7 +4002,7 @@ lpfc_intr_handler(int irq, void *dev_id)
3311 */ 4002 */
3312 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 4003 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
3313 status >>= (4*LPFC_EXTRA_RING); 4004 status >>= (4*LPFC_EXTRA_RING);
3314 if (status & HA_RXATT) { 4005 if (status & HA_RXMASK) {
3315 lpfc_sli_handle_fast_ring_event(phba, 4006 lpfc_sli_handle_fast_ring_event(phba,
3316 &phba->sli.ring[LPFC_EXTRA_RING], 4007 &phba->sli.ring[LPFC_EXTRA_RING],
3317 status); 4008 status);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 41c38d324ab0..76058505795e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -20,6 +20,7 @@
20 20
21/* forward declaration for LPFC_IOCB_t's use */ 21/* forward declaration for LPFC_IOCB_t's use */
22struct lpfc_hba; 22struct lpfc_hba;
23struct lpfc_vport;
23 24
24/* Define the context types that SLI handles for abort and sums. */ 25/* Define the context types that SLI handles for abort and sums. */
25typedef enum _lpfc_ctx_cmd { 26typedef enum _lpfc_ctx_cmd {
@@ -43,10 +44,12 @@ struct lpfc_iocbq {
43#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ 44#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
44#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 45#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
45#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 46#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
47#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
46 48
47 uint8_t abort_count; 49 uint8_t abort_count;
48 uint8_t rsvd2; 50 uint8_t rsvd2;
49 uint32_t drvrTimeout; /* driver timeout in seconds */ 51 uint32_t drvrTimeout; /* driver timeout in seconds */
52 struct lpfc_vport *vport;/* virtual port pointer */
50 void *context1; /* caller context information */ 53 void *context1; /* caller context information */
51 void *context2; /* caller context information */ 54 void *context2; /* caller context information */
52 void *context3; /* caller context information */ 55 void *context3; /* caller context information */
@@ -56,6 +59,8 @@ struct lpfc_iocbq {
56 struct lpfcMboxq *mbox; 59 struct lpfcMboxq *mbox;
57 } context_un; 60 } context_un;
58 61
62 void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
63 struct lpfc_iocbq *);
59 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 64 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
60 struct lpfc_iocbq *); 65 struct lpfc_iocbq *);
61 66
@@ -68,12 +73,14 @@ struct lpfc_iocbq {
68#define IOCB_ERROR 2 73#define IOCB_ERROR 2
69#define IOCB_TIMEDOUT 3 74#define IOCB_TIMEDOUT 3
70 75
71#define LPFC_MBX_WAKE 1 76#define LPFC_MBX_WAKE 1
77#define LPFC_MBX_IMED_UNREG 2
72 78
73typedef struct lpfcMboxq { 79typedef struct lpfcMboxq {
74 /* MBOXQs are used in single linked lists */ 80 /* MBOXQs are used in single linked lists */
75 struct list_head list; /* ptr to next mailbox command */ 81 struct list_head list; /* ptr to next mailbox command */
76 MAILBOX_t mb; /* Mailbox cmd */ 82 MAILBOX_t mb; /* Mailbox cmd */
83 struct lpfc_vport *vport;/* virutal port pointer */
77 void *context1; /* caller context information */ 84 void *context1; /* caller context information */
78 void *context2; /* caller context information */ 85 void *context2; /* caller context information */
79 86
@@ -135,6 +142,8 @@ struct lpfc_sli_ring {
135 uint8_t ringno; /* ring number */ 142 uint8_t ringno; /* ring number */
136 uint16_t numCiocb; /* number of command iocb's per ring */ 143 uint16_t numCiocb; /* number of command iocb's per ring */
137 uint16_t numRiocb; /* number of rsp iocb's per ring */ 144 uint16_t numRiocb; /* number of rsp iocb's per ring */
145 uint16_t sizeCiocb; /* Size of command iocb's in this ring */
146 uint16_t sizeRiocb; /* Size of response iocb's in this ring */
138 147
139 uint32_t fast_iotag; /* max fastlookup based iotag */ 148 uint32_t fast_iotag; /* max fastlookup based iotag */
140 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 149 uint32_t iotag_ctr; /* keeps track of the next iotag to use */
@@ -165,6 +174,34 @@ struct lpfc_sli_ring {
165 struct lpfc_sli_ring *); 174 struct lpfc_sli_ring *);
166}; 175};
167 176
177/* Structure used for configuring rings to a specific profile or rctl / type */
178struct lpfc_hbq_init {
179 uint32_t rn; /* Receive buffer notification */
180 uint32_t entry_count; /* max # of entries in HBQ */
181 uint32_t headerLen; /* 0 if not profile 4 or 5 */
182 uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */
183 uint32_t profile; /* Selection profile 0=all, 7=logentry */
184 uint32_t ring_mask; /* Binds HBQ to a ring e.g. Ring0=b0001,
185 * ring2=b0100 */
186 uint32_t hbq_index; /* index of this hbq in ring .HBQs[] */
187
188 uint32_t seqlenoff;
189 uint32_t maxlen;
190 uint32_t seqlenbcnt;
191 uint32_t cmdcodeoff;
192 uint32_t cmdmatch[8];
193 uint32_t mask_count; /* number of mask entries in prt array */
194 struct hbq_mask hbqMasks[6];
195
196 /* Non-config rings fields to keep track of buffer allocations */
197 uint32_t buffer_count; /* number of buffers allocated */
198 uint32_t init_count; /* number to allocate when initialized */
199 uint32_t add_count; /* number to allocate when starved */
200} ;
201
202#define LPFC_MAX_HBQ 16
203
204
168/* Structure used to hold SLI statistical counters and info */ 205/* Structure used to hold SLI statistical counters and info */
169struct lpfc_sli_stat { 206struct lpfc_sli_stat {
170 uint64_t mbox_stat_err; /* Mbox cmds completed status error */ 207 uint64_t mbox_stat_err; /* Mbox cmds completed status error */
@@ -197,6 +234,7 @@ struct lpfc_sli {
197#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 234#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
198#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 235#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
199#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 236#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
237#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
200 238
201 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 239 struct lpfc_sli_ring ring[LPFC_MAX_RING];
202 int fcp_ring; /* ring used for FCP initiator commands */ 240 int fcp_ring; /* ring used for FCP initiator commands */
@@ -209,6 +247,7 @@ struct lpfc_sli {
209 uint16_t mboxq_cnt; /* current length of queue */ 247 uint16_t mboxq_cnt; /* current length of queue */
210 uint16_t mboxq_max; /* max length */ 248 uint16_t mboxq_max; /* max length */
211 LPFC_MBOXQ_t *mbox_active; /* active mboxq information */ 249 LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
250 struct list_head mboxq_cmpl;
212 251
213 struct timer_list mbox_tmo; /* Hold clk to timeout active mbox 252 struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
214 cmd */ 253 cmd */
@@ -221,12 +260,6 @@ struct lpfc_sli {
221 struct lpfc_lnk_stat lnk_stat_offsets; 260 struct lpfc_lnk_stat lnk_stat_offsets;
222}; 261};
223 262
224/* Given a pointer to the start of the ring, and the slot number of
225 * the desired iocb entry, calc a pointer to that entry.
226 * (assume iocb entry size is 32 bytes, or 8 words)
227 */
228#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
229
230#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 263#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
231 command */ 264 command */
232#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 265#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 92a9107019d2..a5bc79eef052 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.12" 21#define LPFC_DRIVER_VERSION "8.2.1"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
new file mode 100644
index 000000000000..85797dbf5478
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -0,0 +1,523 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_version.h"
43#include "lpfc_vport.h"
44
45inline void lpfc_vport_set_state(struct lpfc_vport *vport,
46 enum fc_vport_state new_state)
47{
48 struct fc_vport *fc_vport = vport->fc_vport;
49
50 if (fc_vport) {
51 /*
52 * When the transport defines fc_vport_set state we will replace
53 * this code with the following line
54 */
55 /* fc_vport_set_state(fc_vport, new_state); */
56 if (new_state != FC_VPORT_INITIALIZING)
57 fc_vport->vport_last_state = fc_vport->vport_state;
58 fc_vport->vport_state = new_state;
59 }
60
61 /* for all the error states we will set the invternal state to FAILED */
62 switch (new_state) {
63 case FC_VPORT_NO_FABRIC_SUPP:
64 case FC_VPORT_NO_FABRIC_RSCS:
65 case FC_VPORT_FABRIC_LOGOUT:
66 case FC_VPORT_FABRIC_REJ_WWN:
67 case FC_VPORT_FAILED:
68 vport->port_state = LPFC_VPORT_FAILED;
69 break;
70 case FC_VPORT_LINKDOWN:
71 vport->port_state = LPFC_VPORT_UNKNOWN;
72 break;
73 default:
74 /* do nothing */
75 break;
76 }
77}
78
79static int
80lpfc_alloc_vpi(struct lpfc_hba *phba)
81{
82 int vpi;
83
84 spin_lock_irq(&phba->hbalock);
85 /* Start at bit 1 because vpi zero is reserved for the physical port */
86 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
87 if (vpi > phba->max_vpi)
88 vpi = 0;
89 else
90 set_bit(vpi, phba->vpi_bmask);
91 spin_unlock_irq(&phba->hbalock);
92 return vpi;
93}
94
95static void
96lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
97{
98 spin_lock_irq(&phba->hbalock);
99 clear_bit(vpi, phba->vpi_bmask);
100 spin_unlock_irq(&phba->hbalock);
101}
102
103static int
104lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
105{
106 LPFC_MBOXQ_t *pmb;
107 MAILBOX_t *mb;
108 struct lpfc_dmabuf *mp;
109 int rc;
110
111 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
112 if (!pmb) {
113 return -ENOMEM;
114 }
115 mb = &pmb->mb;
116
117 lpfc_read_sparam(phba, pmb, vport->vpi);
118 /*
119 * Grab buffer pointer and clear context1 so we can use
120 * lpfc_sli_issue_box_wait
121 */
122 mp = (struct lpfc_dmabuf *) pmb->context1;
123 pmb->context1 = NULL;
124
125 pmb->vport = vport;
126 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
127 if (rc != MBX_SUCCESS) {
128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
129 "%d (%d):1818 VPort failed init, mbxCmd x%x "
130 "READ_SPARM mbxStatus x%x, rc = x%x\n",
131 phba->brd_no, vport->vpi,
132 mb->mbxCommand, mb->mbxStatus, rc);
133 lpfc_mbuf_free(phba, mp->virt, mp->phys);
134 kfree(mp);
135 if (rc != MBX_TIMEOUT)
136 mempool_free(pmb, phba->mbox_mem_pool);
137 return -EIO;
138 }
139
140 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
141 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
142 sizeof (struct lpfc_name));
143 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
144 sizeof (struct lpfc_name));
145
146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
147 kfree(mp);
148 mempool_free(pmb, phba->mbox_mem_pool);
149
150 return 0;
151}
152
153static int
154lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
155 const char *name_type)
156{
157 /* ensure that IEEE format 1 addresses
158 * contain zeros in bits 59-48
159 */
160 if (!((wwn->u.wwn[0] >> 4) == 1 &&
161 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
162 return 1;
163
164 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
165 "%d:1822 Invalid %s: %02x:%02x:%02x:%02x:"
166 "%02x:%02x:%02x:%02x\n",
167 phba->brd_no, name_type,
168 wwn->u.wwn[0], wwn->u.wwn[1],
169 wwn->u.wwn[2], wwn->u.wwn[3],
170 wwn->u.wwn[4], wwn->u.wwn[5],
171 wwn->u.wwn[6], wwn->u.wwn[7]);
172 return 0;
173}
174
175static int
176lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
177{
178 struct lpfc_vport *vport;
179
180 list_for_each_entry(vport, &phba->port_list, listentry) {
181 if (vport == new_vport)
182 continue;
183 /* If they match, return not unique */
184 if (memcmp(&vport->fc_sparam.portName,
185 &new_vport->fc_sparam.portName,
186 sizeof(struct lpfc_name)) == 0)
187 return 0;
188 }
189 return 1;
190}
191
192int
193lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
194{
195 struct lpfc_nodelist *ndlp;
196 struct lpfc_vport *pport =
197 (struct lpfc_vport *) fc_vport->shost->hostdata;
198 struct lpfc_hba *phba = pport->phba;
199 struct lpfc_vport *vport = NULL;
200 int instance;
201 int vpi;
202 int rc = VPORT_ERROR;
203
204 if ((phba->sli_rev < 3) ||
205 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
206 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
207 "%d:1808 Create VPORT failed: "
208 "NPIV is not enabled: SLImode:%d\n",
209 phba->brd_no, phba->sli_rev);
210 rc = VPORT_INVAL;
211 goto error_out;
212 }
213
214 vpi = lpfc_alloc_vpi(phba);
215 if (vpi == 0) {
216 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
217 "%d:1809 Create VPORT failed: "
218 "Max VPORTs (%d) exceeded\n",
219 phba->brd_no, phba->max_vpi);
220 rc = VPORT_NORESOURCES;
221 goto error_out;
222 }
223
224
225 /* Assign an unused board number */
226 if ((instance = lpfc_get_instance()) < 0) {
227 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
228 "%d:1810 Create VPORT failed: Cannot get "
229 "instance number\n", phba->brd_no);
230 lpfc_free_vpi(phba, vpi);
231 rc = VPORT_NORESOURCES;
232 goto error_out;
233 }
234
235 vport = lpfc_create_port(phba, instance, fc_vport);
236 if (!vport) {
237 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
238 "%d:1811 Create VPORT failed: vpi x%x\n",
239 phba->brd_no, vpi);
240 lpfc_free_vpi(phba, vpi);
241 rc = VPORT_NORESOURCES;
242 goto error_out;
243 }
244
245 vport->vpi = vpi;
246 lpfc_debugfs_initialize(vport);
247
248 if (lpfc_vport_sparm(phba, vport)) {
249 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
250 "%d:1813 Create VPORT failed: vpi:%d "
251 "Cannot get sparam\n",
252 phba->brd_no, vpi);
253 lpfc_free_vpi(phba, vpi);
254 destroy_port(vport);
255 rc = VPORT_NORESOURCES;
256 goto error_out;
257 }
258
259 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
260 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
261
262 if (fc_vport->node_name != 0)
263 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
264 if (fc_vport->port_name != 0)
265 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
266
267 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
268 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
269
270 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
271 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
272 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
273 "%d:1821 Create VPORT failed: vpi:%d "
274 "Invalid WWN format\n",
275 phba->brd_no, vpi);
276 lpfc_free_vpi(phba, vpi);
277 destroy_port(vport);
278 rc = VPORT_INVAL;
279 goto error_out;
280 }
281
282 if (!lpfc_unique_wwpn(phba, vport)) {
283 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
284 "%d:1823 Create VPORT failed: vpi:%d "
285 "Duplicate WWN on HBA\n",
286 phba->brd_no, vpi);
287 lpfc_free_vpi(phba, vpi);
288 destroy_port(vport);
289 rc = VPORT_INVAL;
290 goto error_out;
291 }
292
293 *(struct lpfc_vport **)fc_vport->dd_data = vport;
294 vport->fc_vport = fc_vport;
295
296 if ((phba->link_state < LPFC_LINK_UP) ||
297 (phba->fc_topology == TOPOLOGY_LOOP)) {
298 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
299 rc = VPORT_OK;
300 goto out;
301 }
302
303 if (disable) {
304 rc = VPORT_OK;
305 goto out;
306 }
307
308 /* Use the Physical nodes Fabric NDLP to determine if the link is
309 * up and ready to FDISC.
310 */
311 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
312 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
313 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
314 lpfc_set_disctmo(vport);
315 lpfc_initial_fdisc(vport);
316 } else {
317 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
318 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
319 "%d (%d):0262 No NPIV Fabric "
320 "support\n",
321 phba->brd_no, vport->vpi);
322 }
323 } else {
324 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
325 }
326 rc = VPORT_OK;
327
328out:
329 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
330error_out:
331 return rc;
332}
333
334int
335disable_vport(struct fc_vport *fc_vport)
336{
337 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
338 struct lpfc_hba *phba = vport->phba;
339 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
340 long timeout;
341
342 ndlp = lpfc_findnode_did(vport, Fabric_DID);
343 if (ndlp && phba->link_state >= LPFC_LINK_UP) {
344 vport->unreg_vpi_cmpl = VPORT_INVAL;
345 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
346 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
347 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
348 timeout = schedule_timeout(timeout);
349 }
350
351 lpfc_sli_host_down(vport);
352
353 /* Mark all nodes for discovery so we can remove them by
354 * calling lpfc_cleanup_rpis(vport, 1)
355 */
356 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
357 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
358 continue;
359 lpfc_disc_state_machine(vport, ndlp, NULL,
360 NLP_EVT_DEVICE_RECOVERY);
361 }
362 lpfc_cleanup_rpis(vport, 1);
363
364 lpfc_stop_vport_timers(vport);
365 lpfc_unreg_all_rpis(vport);
366 lpfc_unreg_default_rpis(vport);
367 /*
368 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
369 * scsi_host_put() to release the vport.
370 */
371 lpfc_mbx_unreg_vpi(vport);
372
373 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
374 return VPORT_OK;
375}
376
377int
378enable_vport(struct fc_vport *fc_vport)
379{
380 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
381 struct lpfc_hba *phba = vport->phba;
382 struct lpfc_nodelist *ndlp = NULL;
383
384 if ((phba->link_state < LPFC_LINK_UP) ||
385 (phba->fc_topology == TOPOLOGY_LOOP)) {
386 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
387 return VPORT_OK;
388 }
389
390 vport->load_flag |= FC_LOADING;
391 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
392
393 /* Use the Physical nodes Fabric NDLP to determine if the link is
394 * up and ready to FDISC.
395 */
396 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
397 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
398 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
399 lpfc_set_disctmo(vport);
400 lpfc_initial_fdisc(vport);
401 } else {
402 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
403 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
404 "%d (%d):0264 No NPIV Fabric "
405 "support\n",
406 phba->brd_no, vport->vpi);
407 }
408 } else {
409 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
410 }
411
412 return VPORT_OK;
413}
414
415int
416lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
417{
418 if (disable)
419 return disable_vport(fc_vport);
420 else
421 return enable_vport(fc_vport);
422}
423
424
425int
426lpfc_vport_delete(struct fc_vport *fc_vport)
427{
428 struct lpfc_nodelist *ndlp = NULL;
429 struct lpfc_nodelist *next_ndlp;
430 struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
431 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
432 struct lpfc_hba *phba = vport->phba;
433 long timeout;
434 int rc = VPORT_ERROR;
435
436 /*
437 * This is a bit of a mess. We want to ensure the shost doesn't get
438 * torn down until we're done with the embedded lpfc_vport structure.
439 *
440 * Beyond holding a reference for this function, we also need a
441 * reference for outstanding I/O requests we schedule during delete
442 * processing. But once we scsi_remove_host() we can no longer obtain
443 * a reference through scsi_host_get().
444 *
445 * So we take two references here. We release one reference at the
446 * bottom of the function -- after delinking the vport. And we
447 * release the other at the completion of the unreg_vpi that get's
448 * initiated after we've disposed of all other resources associated
449 * with the port.
450 */
451 if (!scsi_host_get(shost) || !scsi_host_get(shost))
452 return VPORT_INVAL;
453
454 if (vport->port_type == LPFC_PHYSICAL_PORT) {
455 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
456 "%d:1812 vport_delete failed: Cannot delete "
457 "physical host\n", phba->brd_no);
458 goto out;
459 }
460
461 vport->load_flag |= FC_UNLOADING;
462
463 kfree(vport->vname);
464 lpfc_debugfs_terminate(vport);
465 fc_remove_host(lpfc_shost_from_vport(vport));
466 scsi_remove_host(lpfc_shost_from_vport(vport));
467
468 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
469 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
470 phba->link_state >= LPFC_LINK_UP) {
471
472 /* First look for the Fabric ndlp */
473 ndlp = lpfc_findnode_did(vport, Fabric_DID);
474 if (!ndlp) {
475 /* Cannot find existing Fabric ndlp, allocate one */
476 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
477 if (!ndlp)
478 goto skip_logo;
479 lpfc_nlp_init(vport, ndlp, Fabric_DID);
480 } else {
481 lpfc_dequeue_node(vport, ndlp);
482 }
483 vport->unreg_vpi_cmpl = VPORT_INVAL;
484 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
485 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
486 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
487 timeout = schedule_timeout(timeout);
488 }
489
490skip_logo:
491 lpfc_sli_host_down(vport);
492
493 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
494 lpfc_disc_state_machine(vport, ndlp, NULL,
495 NLP_EVT_DEVICE_RECOVERY);
496 lpfc_disc_state_machine(vport, ndlp, NULL,
497 NLP_EVT_DEVICE_RM);
498 }
499
500 lpfc_stop_vport_timers(vport);
501 lpfc_unreg_all_rpis(vport);
502 lpfc_unreg_default_rpis(vport);
503 /*
504 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
505 * scsi_host_put() to release the vport.
506 */
507 lpfc_mbx_unreg_vpi(vport);
508
509 lpfc_free_vpi(phba, vport->vpi);
510 vport->work_port_events = 0;
511 spin_lock_irq(&phba->hbalock);
512 list_del_init(&vport->listentry);
513 spin_unlock_irq(&phba->hbalock);
514
515 rc = VPORT_OK;
516out:
517 scsi_host_put(shost);
518 return rc;
519}
520
521
522EXPORT_SYMBOL(lpfc_vport_create);
523EXPORT_SYMBOL(lpfc_vport_delete);
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
new file mode 100644
index 000000000000..f223550f8cba
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -0,0 +1,113 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22#ifndef _H_LPFC_VPORT
23#define _H_LPFC_VPORT
24
25/* API version values (each will be an individual bit) */
26#define VPORT_API_VERSION_1 0x01
27
28/* Values returned via lpfc_vport_getinfo() */
29struct vport_info {
30
31 uint32_t api_versions;
32 uint8_t linktype;
33#define VPORT_TYPE_PHYSICAL 0
34#define VPORT_TYPE_VIRTUAL 1
35
36 uint8_t state;
37#define VPORT_STATE_OFFLINE 0
38#define VPORT_STATE_ACTIVE 1
39#define VPORT_STATE_FAILED 2
40
41 uint8_t fail_reason;
42 uint8_t prev_fail_reason;
43#define VPORT_FAIL_UNKNOWN 0
44#define VPORT_FAIL_LINKDOWN 1
45#define VPORT_FAIL_FAB_UNSUPPORTED 2
46#define VPORT_FAIL_FAB_NORESOURCES 3
47#define VPORT_FAIL_FAB_LOGOUT 4
48#define VPORT_FAIL_ADAP_NORESOURCES 5
49
50 uint8_t node_name[8]; /* WWNN */
51 uint8_t port_name[8]; /* WWPN */
52
53 struct Scsi_Host *shost;
54
55/* Following values are valid only on physical links */
56 uint32_t vports_max;
57 uint32_t vports_inuse;
58 uint32_t rpi_max;
59 uint32_t rpi_inuse;
60#define VPORT_CNT_INVALID 0xFFFFFFFF
61};
62
63/* data used in link creation */
64struct vport_data {
65 uint32_t api_version;
66
67 uint32_t options;
68#define VPORT_OPT_AUTORETRY 0x01
69
70 uint8_t node_name[8]; /* WWNN */
71 uint8_t port_name[8]; /* WWPN */
72
73/*
74 * Upon successful creation, vport_shost will point to the new Scsi_Host
75 * structure for the new virtual link.
76 */
77 struct Scsi_Host *vport_shost;
78};
79
80/* API function return codes */
81#define VPORT_OK 0
82#define VPORT_ERROR -1
83#define VPORT_INVAL -2
84#define VPORT_NOMEM -3
85#define VPORT_NORESOURCES -4
86
87int lpfc_vport_create(struct fc_vport *, bool);
88int lpfc_vport_delete(struct fc_vport *);
89int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
91
92/*
93 * queuecommand VPORT-specific return codes. Specified in the host byte code.
94 * Returned when the virtual link has failed or is not active.
95 */
96#define DID_VPORT_ERROR 0x0f
97
98#define VPORT_INFO 0x1
99#define VPORT_CREATE 0x2
100#define VPORT_DELETE 0x4
101
102struct vport_cmd_tag {
103 uint32_t cmd;
104 struct vport_data cdata;
105 struct vport_info cinfo;
106 void *vport;
107 int vport_num;
108};
109
110void lpfc_vport_set_state(struct lpfc_vport *vport,
111 enum fc_vport_state new_state);
112
113#endif /* H_LPFC_VPORT */