aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h41
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c390
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2811
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h110
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c67
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c354
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1321
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h57
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h399
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1348
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c147
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c490
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1911
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h146
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c15
25 files changed, 7769 insertions, 2053 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index aa10f7951634..565e16dd74fc 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -37,6 +37,9 @@ struct lpfc_sli2_slim;
37 the NameServer before giving up. */ 37 the NameServer before giving up. */
38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
40#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
41 cmnd for menlo needs nearly twice as for firmware
42 downloads using bsg */
40#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 43#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
41#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 44#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
42#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ 45#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
@@ -109,7 +112,8 @@ struct hbq_dmabuf {
109 struct lpfc_dmabuf dbuf; 112 struct lpfc_dmabuf dbuf;
110 uint32_t size; 113 uint32_t size;
111 uint32_t tag; 114 uint32_t tag;
112 struct lpfc_rcqe rcqe; 115 struct lpfc_cq_event cq_event;
116 unsigned long time_stamp;
113}; 117};
114 118
115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 119/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -201,6 +205,7 @@ struct lpfc_stats {
201 uint32_t elsRcvLIRR; 205 uint32_t elsRcvLIRR;
202 uint32_t elsRcvRPS; 206 uint32_t elsRcvRPS;
203 uint32_t elsRcvRPL; 207 uint32_t elsRcvRPL;
208 uint32_t elsRcvRRQ;
204 uint32_t elsXmitFLOGI; 209 uint32_t elsXmitFLOGI;
205 uint32_t elsXmitFDISC; 210 uint32_t elsXmitFDISC;
206 uint32_t elsXmitPLOGI; 211 uint32_t elsXmitPLOGI;
@@ -289,8 +294,8 @@ struct lpfc_vport {
289 294
290 uint16_t vpi; 295 uint16_t vpi;
291 uint16_t vfi; 296 uint16_t vfi;
292 uint8_t vfi_state; 297 uint8_t vpi_state;
293#define LPFC_VFI_REGISTERED 0x1 298#define LPFC_VPI_REGISTERED 0x1
294 299
295 uint32_t fc_flag; /* FC flags */ 300 uint32_t fc_flag; /* FC flags */
296/* Several of these flags are HBA centric and should be moved to 301/* Several of these flags are HBA centric and should be moved to
@@ -313,6 +318,9 @@ struct lpfc_vport {
313#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ 318#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
314#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ 319#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
315#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */ 320#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
321#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
322#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
323#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
316 324
317 uint32_t ct_flags; 325 uint32_t ct_flags;
318#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ 326#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -405,6 +413,7 @@ struct lpfc_vport {
405 uint8_t stat_data_enabled; 413 uint8_t stat_data_enabled;
406 uint8_t stat_data_blocked; 414 uint8_t stat_data_blocked;
407 struct list_head rcv_buffer_list; 415 struct list_head rcv_buffer_list;
416 unsigned long rcv_buffer_time_stamp;
408 uint32_t vport_flag; 417 uint32_t vport_flag;
409#define STATIC_VPORT 1 418#define STATIC_VPORT 1
410}; 419};
@@ -445,6 +454,8 @@ struct unsol_rcv_ct_ctx {
445 uint32_t ctxt_id; 454 uint32_t ctxt_id;
446 uint32_t SID; 455 uint32_t SID;
447 uint32_t oxid; 456 uint32_t oxid;
457 uint32_t flags;
458#define UNSOL_VALID 0x00000001
448}; 459};
449 460
450struct lpfc_hba { 461struct lpfc_hba {
@@ -496,7 +507,10 @@ struct lpfc_hba {
496 (struct lpfc_hba *); 507 (struct lpfc_hba *);
497 void (*lpfc_stop_port) 508 void (*lpfc_stop_port)
498 (struct lpfc_hba *); 509 (struct lpfc_hba *);
499 510 int (*lpfc_hba_init_link)
511 (struct lpfc_hba *);
512 int (*lpfc_hba_down_link)
513 (struct lpfc_hba *);
500 514
501 /* SLI4 specific HBA data structure */ 515 /* SLI4 specific HBA data structure */
502 struct lpfc_sli4_hba sli4_hba; 516 struct lpfc_sli4_hba sli4_hba;
@@ -527,13 +541,16 @@ struct lpfc_hba {
527#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 541#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
528#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ 542#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
529#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ 543#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
530#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ 544#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
531#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ 545#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
532#define FCP_XRI_ABORT_EVENT 0x20 546#define FCP_XRI_ABORT_EVENT 0x20
533#define ELS_XRI_ABORT_EVENT 0x40 547#define ELS_XRI_ABORT_EVENT 0x40
534#define ASYNC_EVENT 0x80 548#define ASYNC_EVENT 0x80
535#define LINK_DISABLED 0x100 /* Link disabled by user */ 549#define LINK_DISABLED 0x100 /* Link disabled by user */
536#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ 550#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
551#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */
552#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */
553 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
537 struct lpfc_dmabuf slim2p; 554 struct lpfc_dmabuf slim2p;
538 555
539 MAILBOX_t *mbox; 556 MAILBOX_t *mbox;
@@ -551,6 +568,7 @@ struct lpfc_hba {
551 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 568 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
552 569
553 uint32_t fc_eventTag; /* event tag for link attention */ 570 uint32_t fc_eventTag; /* event tag for link attention */
571 uint32_t link_events;
554 572
555 /* These fields used to be binfo */ 573 /* These fields used to be binfo */
556 uint32_t fc_pref_DID; /* preferred D_ID */ 574 uint32_t fc_pref_DID; /* preferred D_ID */
@@ -604,8 +622,12 @@ struct lpfc_hba {
604 uint32_t cfg_enable_hba_reset; 622 uint32_t cfg_enable_hba_reset;
605 uint32_t cfg_enable_hba_heartbeat; 623 uint32_t cfg_enable_hba_heartbeat;
606 uint32_t cfg_enable_bg; 624 uint32_t cfg_enable_bg;
607 uint32_t cfg_enable_fip;
608 uint32_t cfg_log_verbose; 625 uint32_t cfg_log_verbose;
626 uint32_t cfg_aer_support;
627 uint32_t cfg_suppress_link_up;
628#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
629#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
630#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
609 631
610 lpfc_vpd_t vpd; /* vital product data */ 632 lpfc_vpd_t vpd; /* vital product data */
611 633
@@ -783,10 +805,13 @@ struct lpfc_hba {
783 uint16_t vlan_id; 805 uint16_t vlan_id;
784 struct list_head fcf_conn_rec_list; 806 struct list_head fcf_conn_rec_list;
785 807
786 struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */ 808 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
787 struct list_head ct_ev_waiters; 809 struct list_head ct_ev_waiters;
788 struct unsol_rcv_ct_ctx ct_ctx[64]; 810 struct unsol_rcv_ct_ctx ct_ctx[64];
789 uint32_t ctx_idx; 811 uint32_t ctx_idx;
812
813 uint8_t menlo_flag; /* menlo generic flags */
814#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
790}; 815};
791 816
792static inline struct Scsi_Host * 817static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e1a30a16a9fa..1849e33e68f9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -23,12 +23,15 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/aer.h>
27#include <linux/gfp.h>
26 28
27#include <scsi/scsi.h> 29#include <scsi/scsi.h>
28#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
30#include <scsi/scsi_tcq.h> 32#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/fc/fc_fs.h>
32 35
33#include "lpfc_hw4.h" 36#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 37#include "lpfc_hw.h"
@@ -98,6 +101,28 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
98 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 101 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
99} 102}
100 103
104/**
105 * lpfc_enable_fip_show - Return the fip mode of the HBA
106 * @dev: class unused variable.
107 * @attr: device attribute, not used.
108 * @buf: on return contains the module description text.
109 *
110 * Returns: size of formatted string.
111 **/
112static ssize_t
113lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
114 char *buf)
115{
116 struct Scsi_Host *shost = class_to_shost(dev);
117 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
118 struct lpfc_hba *phba = vport->phba;
119
120 if (phba->hba_flag & HBA_FIP_SUPPORT)
121 return snprintf(buf, PAGE_SIZE, "1\n");
122 else
123 return snprintf(buf, PAGE_SIZE, "0\n");
124}
125
101static ssize_t 126static ssize_t
102lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, 127lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
103 char *buf) 128 char *buf)
@@ -458,6 +483,41 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
458} 483}
459 484
460/** 485/**
486 * lpfc_link_state_store - Transition the link_state on an HBA port
487 * @dev: class device that is converted into a Scsi_host.
488 * @attr: device attribute, not used.
489 * @buf: one or more lpfc_polling_flags values.
490 * @count: not used.
491 *
492 * Returns:
493 * -EINVAL if the buffer is not "up" or "down"
494 * return from link state change function if non-zero
495 * length of the buf on success
496 **/
497static ssize_t
498lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
499 const char *buf, size_t count)
500{
501 struct Scsi_Host *shost = class_to_shost(dev);
502 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
503 struct lpfc_hba *phba = vport->phba;
504
505 int status = -EINVAL;
506
507 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
508 (phba->link_state == LPFC_LINK_DOWN))
509 status = phba->lpfc_hba_init_link(phba);
510 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
511 (phba->link_state >= LPFC_LINK_UP))
512 status = phba->lpfc_hba_down_link(phba);
513
514 if (status == 0)
515 return strlen(buf);
516 else
517 return status;
518}
519
520/**
461 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports 521 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
462 * @dev: class device that is converted into a Scsi_host. 522 * @dev: class device that is converted into a Scsi_host.
463 * @attr: device attribute, not used. 523 * @attr: device attribute, not used.
@@ -654,7 +714,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
654 * Notes: 714 * Notes:
655 * Assumes any error from lpfc_selective_reset() will be negative. 715 * Assumes any error from lpfc_selective_reset() will be negative.
656 * If lpfc_selective_reset() returns zero then the length of the buffer 716 * If lpfc_selective_reset() returns zero then the length of the buffer
657 * is returned which indicates succcess 717 * is returned which indicates success
658 * 718 *
659 * Returns: 719 * Returns:
660 * -EINVAL if the buffer does not contain the string "selective" 720 * -EINVAL if the buffer does not contain the string "selective"
@@ -762,9 +822,15 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
762 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) 822 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
763 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 823 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
764 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) 824 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
765 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); 825 if (phba->sli_rev == LPFC_SLI_REV4)
826 return -EINVAL;
827 else
828 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
766 else if (strncmp(buf, "error", sizeof("error") - 1) == 0) 829 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
767 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 830 if (phba->sli_rev == LPFC_SLI_REV4)
831 return -EINVAL;
832 else
833 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
768 else 834 else
769 return -EINVAL; 835 return -EINVAL;
770 836
@@ -1126,6 +1192,9 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
1126 if ((val & 0x3) != val) 1192 if ((val & 0x3) != val)
1127 return -EINVAL; 1193 return -EINVAL;
1128 1194
1195 if (phba->sli_rev == LPFC_SLI_REV4)
1196 val = 0;
1197
1129 spin_lock_irq(&phba->hbalock); 1198 spin_lock_irq(&phba->hbalock);
1130 1199
1131 old_val = phba->cfg_poll; 1200 old_val = phba->cfg_poll;
@@ -1186,7 +1255,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1186 struct Scsi_Host *shost = class_to_shost(dev);\ 1255 struct Scsi_Host *shost = class_to_shost(dev);\
1187 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1256 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1188 struct lpfc_hba *phba = vport->phba;\ 1257 struct lpfc_hba *phba = vport->phba;\
1189 int val = 0;\ 1258 uint val = 0;\
1190 val = phba->cfg_##attr;\ 1259 val = phba->cfg_##attr;\
1191 return snprintf(buf, PAGE_SIZE, "%d\n",\ 1260 return snprintf(buf, PAGE_SIZE, "%d\n",\
1192 phba->cfg_##attr);\ 1261 phba->cfg_##attr);\
@@ -1214,7 +1283,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1214 struct Scsi_Host *shost = class_to_shost(dev);\ 1283 struct Scsi_Host *shost = class_to_shost(dev);\
1215 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1284 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1216 struct lpfc_hba *phba = vport->phba;\ 1285 struct lpfc_hba *phba = vport->phba;\
1217 int val = 0;\ 1286 uint val = 0;\
1218 val = phba->cfg_##attr;\ 1287 val = phba->cfg_##attr;\
1219 return snprintf(buf, PAGE_SIZE, "%#x\n",\ 1288 return snprintf(buf, PAGE_SIZE, "%#x\n",\
1220 phba->cfg_##attr);\ 1289 phba->cfg_##attr);\
@@ -1241,7 +1310,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1241 **/ 1310 **/
1242#define lpfc_param_init(attr, default, minval, maxval) \ 1311#define lpfc_param_init(attr, default, minval, maxval) \
1243static int \ 1312static int \
1244lpfc_##attr##_init(struct lpfc_hba *phba, int val) \ 1313lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
1245{ \ 1314{ \
1246 if (val >= minval && val <= maxval) {\ 1315 if (val >= minval && val <= maxval) {\
1247 phba->cfg_##attr = val;\ 1316 phba->cfg_##attr = val;\
@@ -1276,7 +1345,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
1276 **/ 1345 **/
1277#define lpfc_param_set(attr, default, minval, maxval) \ 1346#define lpfc_param_set(attr, default, minval, maxval) \
1278static int \ 1347static int \
1279lpfc_##attr##_set(struct lpfc_hba *phba, int val) \ 1348lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
1280{ \ 1349{ \
1281 if (val >= minval && val <= maxval) {\ 1350 if (val >= minval && val <= maxval) {\
1282 phba->cfg_##attr = val;\ 1351 phba->cfg_##attr = val;\
@@ -1317,7 +1386,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
1317 struct Scsi_Host *shost = class_to_shost(dev);\ 1386 struct Scsi_Host *shost = class_to_shost(dev);\
1318 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1387 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1319 struct lpfc_hba *phba = vport->phba;\ 1388 struct lpfc_hba *phba = vport->phba;\
1320 int val=0;\ 1389 uint val = 0;\
1321 if (!isdigit(buf[0]))\ 1390 if (!isdigit(buf[0]))\
1322 return -EINVAL;\ 1391 return -EINVAL;\
1323 if (sscanf(buf, "%i", &val) != 1)\ 1392 if (sscanf(buf, "%i", &val) != 1)\
@@ -1349,7 +1418,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1349{ \ 1418{ \
1350 struct Scsi_Host *shost = class_to_shost(dev);\ 1419 struct Scsi_Host *shost = class_to_shost(dev);\
1351 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1420 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1352 int val = 0;\ 1421 uint val = 0;\
1353 val = vport->cfg_##attr;\ 1422 val = vport->cfg_##attr;\
1354 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ 1423 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
1355} 1424}
@@ -1376,7 +1445,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1376{ \ 1445{ \
1377 struct Scsi_Host *shost = class_to_shost(dev);\ 1446 struct Scsi_Host *shost = class_to_shost(dev);\
1378 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1447 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1379 int val = 0;\ 1448 uint val = 0;\
1380 val = vport->cfg_##attr;\ 1449 val = vport->cfg_##attr;\
1381 return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ 1450 return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
1382} 1451}
@@ -1401,7 +1470,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1401 **/ 1470 **/
1402#define lpfc_vport_param_init(attr, default, minval, maxval) \ 1471#define lpfc_vport_param_init(attr, default, minval, maxval) \
1403static int \ 1472static int \
1404lpfc_##attr##_init(struct lpfc_vport *vport, int val) \ 1473lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
1405{ \ 1474{ \
1406 if (val >= minval && val <= maxval) {\ 1475 if (val >= minval && val <= maxval) {\
1407 vport->cfg_##attr = val;\ 1476 vport->cfg_##attr = val;\
@@ -1433,7 +1502,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
1433 **/ 1502 **/
1434#define lpfc_vport_param_set(attr, default, minval, maxval) \ 1503#define lpfc_vport_param_set(attr, default, minval, maxval) \
1435static int \ 1504static int \
1436lpfc_##attr##_set(struct lpfc_vport *vport, int val) \ 1505lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
1437{ \ 1506{ \
1438 if (val >= minval && val <= maxval) {\ 1507 if (val >= minval && val <= maxval) {\
1439 vport->cfg_##attr = val;\ 1508 vport->cfg_##attr = val;\
@@ -1469,7 +1538,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
1469{ \ 1538{ \
1470 struct Scsi_Host *shost = class_to_shost(dev);\ 1539 struct Scsi_Host *shost = class_to_shost(dev);\
1471 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1540 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1472 int val=0;\ 1541 uint val = 0;\
1473 if (!isdigit(buf[0]))\ 1542 if (!isdigit(buf[0]))\
1474 return -EINVAL;\ 1543 return -EINVAL;\
1475 if (sscanf(buf, "%i", &val) != 1)\ 1544 if (sscanf(buf, "%i", &val) != 1)\
@@ -1482,22 +1551,22 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
1482 1551
1483 1552
1484#define LPFC_ATTR(name, defval, minval, maxval, desc) \ 1553#define LPFC_ATTR(name, defval, minval, maxval, desc) \
1485static int lpfc_##name = defval;\ 1554static uint lpfc_##name = defval;\
1486module_param(lpfc_##name, int, 0);\ 1555module_param(lpfc_##name, uint, 0);\
1487MODULE_PARM_DESC(lpfc_##name, desc);\ 1556MODULE_PARM_DESC(lpfc_##name, desc);\
1488lpfc_param_init(name, defval, minval, maxval) 1557lpfc_param_init(name, defval, minval, maxval)
1489 1558
1490#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ 1559#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
1491static int lpfc_##name = defval;\ 1560static uint lpfc_##name = defval;\
1492module_param(lpfc_##name, int, 0);\ 1561module_param(lpfc_##name, uint, 0);\
1493MODULE_PARM_DESC(lpfc_##name, desc);\ 1562MODULE_PARM_DESC(lpfc_##name, desc);\
1494lpfc_param_show(name)\ 1563lpfc_param_show(name)\
1495lpfc_param_init(name, defval, minval, maxval)\ 1564lpfc_param_init(name, defval, minval, maxval)\
1496static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1565static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1497 1566
1498#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ 1567#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
1499static int lpfc_##name = defval;\ 1568static uint lpfc_##name = defval;\
1500module_param(lpfc_##name, int, 0);\ 1569module_param(lpfc_##name, uint, 0);\
1501MODULE_PARM_DESC(lpfc_##name, desc);\ 1570MODULE_PARM_DESC(lpfc_##name, desc);\
1502lpfc_param_show(name)\ 1571lpfc_param_show(name)\
1503lpfc_param_init(name, defval, minval, maxval)\ 1572lpfc_param_init(name, defval, minval, maxval)\
@@ -1507,16 +1576,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1507 lpfc_##name##_show, lpfc_##name##_store) 1576 lpfc_##name##_show, lpfc_##name##_store)
1508 1577
1509#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ 1578#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
1510static int lpfc_##name = defval;\ 1579static uint lpfc_##name = defval;\
1511module_param(lpfc_##name, int, 0);\ 1580module_param(lpfc_##name, uint, 0);\
1512MODULE_PARM_DESC(lpfc_##name, desc);\ 1581MODULE_PARM_DESC(lpfc_##name, desc);\
1513lpfc_param_hex_show(name)\ 1582lpfc_param_hex_show(name)\
1514lpfc_param_init(name, defval, minval, maxval)\ 1583lpfc_param_init(name, defval, minval, maxval)\
1515static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1584static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1516 1585
1517#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ 1586#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
1518static int lpfc_##name = defval;\ 1587static uint lpfc_##name = defval;\
1519module_param(lpfc_##name, int, 0);\ 1588module_param(lpfc_##name, uint, 0);\
1520MODULE_PARM_DESC(lpfc_##name, desc);\ 1589MODULE_PARM_DESC(lpfc_##name, desc);\
1521lpfc_param_hex_show(name)\ 1590lpfc_param_hex_show(name)\
1522lpfc_param_init(name, defval, minval, maxval)\ 1591lpfc_param_init(name, defval, minval, maxval)\
@@ -1526,22 +1595,22 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1526 lpfc_##name##_show, lpfc_##name##_store) 1595 lpfc_##name##_show, lpfc_##name##_store)
1527 1596
1528#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ 1597#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
1529static int lpfc_##name = defval;\ 1598static uint lpfc_##name = defval;\
1530module_param(lpfc_##name, int, 0);\ 1599module_param(lpfc_##name, uint, 0);\
1531MODULE_PARM_DESC(lpfc_##name, desc);\ 1600MODULE_PARM_DESC(lpfc_##name, desc);\
1532lpfc_vport_param_init(name, defval, minval, maxval) 1601lpfc_vport_param_init(name, defval, minval, maxval)
1533 1602
1534#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ 1603#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
1535static int lpfc_##name = defval;\ 1604static uint lpfc_##name = defval;\
1536module_param(lpfc_##name, int, 0);\ 1605module_param(lpfc_##name, uint, 0);\
1537MODULE_PARM_DESC(lpfc_##name, desc);\ 1606MODULE_PARM_DESC(lpfc_##name, desc);\
1538lpfc_vport_param_show(name)\ 1607lpfc_vport_param_show(name)\
1539lpfc_vport_param_init(name, defval, minval, maxval)\ 1608lpfc_vport_param_init(name, defval, minval, maxval)\
1540static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1609static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1541 1610
1542#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ 1611#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
1543static int lpfc_##name = defval;\ 1612static uint lpfc_##name = defval;\
1544module_param(lpfc_##name, int, 0);\ 1613module_param(lpfc_##name, uint, 0);\
1545MODULE_PARM_DESC(lpfc_##name, desc);\ 1614MODULE_PARM_DESC(lpfc_##name, desc);\
1546lpfc_vport_param_show(name)\ 1615lpfc_vport_param_show(name)\
1547lpfc_vport_param_init(name, defval, minval, maxval)\ 1616lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1551,16 +1620,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1551 lpfc_##name##_show, lpfc_##name##_store) 1620 lpfc_##name##_show, lpfc_##name##_store)
1552 1621
1553#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ 1622#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
1554static int lpfc_##name = defval;\ 1623static uint lpfc_##name = defval;\
1555module_param(lpfc_##name, int, 0);\ 1624module_param(lpfc_##name, uint, 0);\
1556MODULE_PARM_DESC(lpfc_##name, desc);\ 1625MODULE_PARM_DESC(lpfc_##name, desc);\
1557lpfc_vport_param_hex_show(name)\ 1626lpfc_vport_param_hex_show(name)\
1558lpfc_vport_param_init(name, defval, minval, maxval)\ 1627lpfc_vport_param_init(name, defval, minval, maxval)\
1559static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1628static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1560 1629
1561#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ 1630#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
1562static int lpfc_##name = defval;\ 1631static uint lpfc_##name = defval;\
1563module_param(lpfc_##name, int, 0);\ 1632module_param(lpfc_##name, uint, 0);\
1564MODULE_PARM_DESC(lpfc_##name, desc);\ 1633MODULE_PARM_DESC(lpfc_##name, desc);\
1565lpfc_vport_param_hex_show(name)\ 1634lpfc_vport_param_hex_show(name)\
1566lpfc_vport_param_init(name, defval, minval, maxval)\ 1635lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1581,7 +1650,8 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
1581static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); 1650static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
1582static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 1651static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
1583static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 1652static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
1584static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL); 1653static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
1654 lpfc_link_state_store);
1585static DEVICE_ATTR(option_rom_version, S_IRUGO, 1655static DEVICE_ATTR(option_rom_version, S_IRUGO,
1586 lpfc_option_rom_version_show, NULL); 1656 lpfc_option_rom_version_show, NULL);
1587static DEVICE_ATTR(num_discovered_ports, S_IRUGO, 1657static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1589,6 +1659,7 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
1589static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); 1659static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
1590static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); 1660static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
1591static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); 1661static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
1662static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
1592static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 1663static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
1593 lpfc_board_mode_show, lpfc_board_mode_store); 1664 lpfc_board_mode_show, lpfc_board_mode_store);
1594static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 1665static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
@@ -1863,6 +1934,17 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
1863 lpfc_enable_npiv_show, NULL); 1934 lpfc_enable_npiv_show, NULL);
1864 1935
1865/* 1936/*
1937# lpfc_suppress_link_up: Bring link up at initialization
1938# 0x0 = bring link up (issue MBX_INIT_LINK)
1939# 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
1940# 0x2 = never bring up link
1941# Default value is 0.
1942*/
1943LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
1944 LPFC_DELAY_INIT_LINK_INDEFINITELY,
1945 "Suppress Link Up at initialization");
1946
1947/*
1866# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1948# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
1867# until the timer expires. Value range is [0,255]. Default value is 30. 1949# until the timer expires. Value range is [0,255]. Default value is 30.
1868*/ 1950*/
@@ -1887,8 +1969,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
1887{ 1969{
1888 struct Scsi_Host *shost = class_to_shost(dev); 1970 struct Scsi_Host *shost = class_to_shost(dev);
1889 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1971 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1890 int val = 0; 1972
1891 val = vport->cfg_devloss_tmo;
1892 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 1973 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
1893} 1974}
1894 1975
@@ -2759,6 +2840,196 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
2759 lpfc_link_speed_show, lpfc_link_speed_store); 2840 lpfc_link_speed_show, lpfc_link_speed_store);
2760 2841
2761/* 2842/*
2843# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
2844# 0 = aer disabled or not supported
2845# 1 = aer supported and enabled (default)
2846# Value range is [0,1]. Default value is 1.
2847*/
2848
2849/**
2850 * lpfc_aer_support_store - Set the adapter for aer support
2851 *
2852 * @dev: class device that is converted into a Scsi_host.
2853 * @attr: device attribute, not used.
2854 * @buf: containing the string "selective".
2855 * @count: unused variable.
2856 *
2857 * Description:
2858 * If the val is 1 and currently the device's AER capability was not
2859 * enabled, invoke the kernel's enable AER helper routine, trying to
2860 * enable the device's AER capability. If the helper routine enabling
2861 * AER returns success, update the device's cfg_aer_support flag to
2862 * indicate AER is supported by the device; otherwise, if the device
2863 * AER capability is already enabled to support AER, then do nothing.
2864 *
2865 * If the val is 0 and currently the device's AER support was enabled,
2866 * invoke the kernel's disable AER helper routine. After that, update
2867 * the device's cfg_aer_support flag to indicate AER is not supported
2868 * by the device; otherwise, if the device AER capability is already
2869 * disabled from supporting AER, then do nothing.
2870 *
2871 * Returns:
2872 * length of the buf on success if val is in range the intended mode
2873 * is supported.
2874 * -EINVAL if val out of range or intended mode is not supported.
2875 **/
2876static ssize_t
2877lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
2878 const char *buf, size_t count)
2879{
2880 struct Scsi_Host *shost = class_to_shost(dev);
2881 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
2882 struct lpfc_hba *phba = vport->phba;
2883 int val = 0, rc = -EINVAL;
2884
2885 /* AER not supported on OC devices yet */
2886 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
2887 return -EPERM;
2888 if (!isdigit(buf[0]))
2889 return -EINVAL;
2890 if (sscanf(buf, "%i", &val) != 1)
2891 return -EINVAL;
2892
2893 switch (val) {
2894 case 0:
2895 if (phba->hba_flag & HBA_AER_ENABLED) {
2896 rc = pci_disable_pcie_error_reporting(phba->pcidev);
2897 if (!rc) {
2898 spin_lock_irq(&phba->hbalock);
2899 phba->hba_flag &= ~HBA_AER_ENABLED;
2900 spin_unlock_irq(&phba->hbalock);
2901 phba->cfg_aer_support = 0;
2902 rc = strlen(buf);
2903 } else
2904 rc = -EPERM;
2905 } else {
2906 phba->cfg_aer_support = 0;
2907 rc = strlen(buf);
2908 }
2909 break;
2910 case 1:
2911 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
2912 rc = pci_enable_pcie_error_reporting(phba->pcidev);
2913 if (!rc) {
2914 spin_lock_irq(&phba->hbalock);
2915 phba->hba_flag |= HBA_AER_ENABLED;
2916 spin_unlock_irq(&phba->hbalock);
2917 phba->cfg_aer_support = 1;
2918 rc = strlen(buf);
2919 } else
2920 rc = -EPERM;
2921 } else {
2922 phba->cfg_aer_support = 1;
2923 rc = strlen(buf);
2924 }
2925 break;
2926 default:
2927 rc = -EINVAL;
2928 break;
2929 }
2930 return rc;
2931}
2932
2933static int lpfc_aer_support = 1;
2934module_param(lpfc_aer_support, int, 1);
2935MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
2936lpfc_param_show(aer_support)
2937
2938/**
2939 * lpfc_aer_support_init - Set the initial adapters aer support flag
2940 * @phba: lpfc_hba pointer.
2941 * @val: link speed value.
2942 *
2943 * Description:
2944 * If val is in a valid range [0,1], then set the adapter's initial
2945 * cfg_aer_support field. It will be up to the driver's probe_one
2946 * routine to determine whether the device's AER support can be set
2947 * or not.
2948 *
2949 * Notes:
2950 * If the value is not in range log a kernel error message, and
2951 * choose the default value of setting AER support and return.
2952 *
2953 * Returns:
2954 * zero if val saved.
2955 * -EINVAL val out of range
2956 **/
2957static int
2958lpfc_aer_support_init(struct lpfc_hba *phba, int val)
2959{
2960 /* AER not supported on OC devices yet */
2961 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
2962 phba->cfg_aer_support = 0;
2963 return -EPERM;
2964 }
2965
2966 if (val == 0 || val == 1) {
2967 phba->cfg_aer_support = val;
2968 return 0;
2969 }
2970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2971 "2712 lpfc_aer_support attribute value %d out "
2972 "of range, allowed values are 0|1, setting it "
2973 "to default value of 1\n", val);
2974 /* By default, try to enable AER on a device */
2975 phba->cfg_aer_support = 1;
2976 return -EINVAL;
2977}
2978
2979static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
2980 lpfc_aer_support_show, lpfc_aer_support_store);
2981
2982/**
2983 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
2984 * @dev: class device that is converted into a Scsi_host.
2985 * @attr: device attribute, not used.
2986 * @buf: containing the string "selective".
2987 * @count: unused variable.
2988 *
2989 * Description:
2990 * If the @buf contains 1 and the device currently has the AER support
2991 * enabled, then invokes the kernel AER helper routine
2992 * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
2993 * error status register.
2994 *
2995 * Notes:
2996 *
2997 * Returns:
2998 * -EINVAL if the buf does not contain the 1 or the device is not currently
2999 * enabled with the AER support.
3000 **/
3001static ssize_t
3002lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
3003 const char *buf, size_t count)
3004{
3005 struct Scsi_Host *shost = class_to_shost(dev);
3006 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3007 struct lpfc_hba *phba = vport->phba;
3008 int val, rc = -1;
3009
3010 /* AER not supported on OC devices yet */
3011 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
3012 return -EPERM;
3013 if (!isdigit(buf[0]))
3014 return -EINVAL;
3015 if (sscanf(buf, "%i", &val) != 1)
3016 return -EINVAL;
3017 if (val != 1)
3018 return -EINVAL;
3019
3020 if (phba->hba_flag & HBA_AER_ENABLED)
3021 rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
3022
3023 if (rc == 0)
3024 return strlen(buf);
3025 else
3026 return -EPERM;
3027}
3028
3029static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
3030 lpfc_aer_cleanup_state);
3031
3032/*
2762# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 3033# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
2763# Value range is [2,3]. Default value is 3. 3034# Value range is [2,3]. Default value is 3.
2764*/ 3035*/
@@ -2846,7 +3117,7 @@ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
2846# identifies what rctl value to configure the additional ring for. 3117# identifies what rctl value to configure the additional ring for.
2847# Value range is [1,0xff]. Default value is 4 (Unsolicated Data). 3118# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
2848*/ 3119*/
2849LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1, 3120LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
2850 255, "Identifies RCTL for additional ring configuration"); 3121 255, "Identifies RCTL for additional ring configuration");
2851 3122
2852/* 3123/*
@@ -2854,7 +3125,7 @@ LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
2854# identifies what type value to configure the additional ring for. 3125# identifies what type value to configure the additional ring for.
2855# Value range is [1,0xff]. Default value is 5 (LLC/SNAP). 3126# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
2856*/ 3127*/
2857LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1, 3128LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
2858 255, "Identifies TYPE for additional ring configuration"); 3129 255, "Identifies TYPE for additional ring configuration");
2859 3130
2860/* 3131/*
@@ -2890,12 +3161,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
2890/* 3161/*
2891# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 3162# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
2892# support this feature 3163# support this feature
2893# 0 = MSI disabled (default) 3164# 0 = MSI disabled
2894# 1 = MSI enabled 3165# 1 = MSI enabled
2895# 2 = MSI-X enabled 3166# 2 = MSI-X enabled (default)
2896# Value range is [0,2]. Default value is 0. 3167# Value range is [0,2]. Default value is 2.
2897*/ 3168*/
2898LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " 3169LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
2899 "MSI-X (2), if possible"); 3170 "MSI-X (2), if possible");
2900 3171
2901/* 3172/*
@@ -2947,15 +3218,6 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2947LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 3218LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2948 3219
2949/* 3220/*
2950# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2951# set, the driver will add an FCF record manually if the port has no
2952# FCF records available and start discovery.
2953# Value range is [0,1]. Default value is 1 (enabled)
2954*/
2955LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2956
2957
2958/*
2959# lpfc_prot_mask: i 3221# lpfc_prot_mask: i
2960# - Bit mask of host protection capabilities used to register with the 3222# - Bit mask of host protection capabilities used to register with the
2961# SCSI mid-layer 3223# SCSI mid-layer
@@ -3013,6 +3275,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3013 &dev_attr_num_discovered_ports, 3275 &dev_attr_num_discovered_ports,
3014 &dev_attr_menlo_mgmt_mode, 3276 &dev_attr_menlo_mgmt_mode,
3015 &dev_attr_lpfc_drvr_version, 3277 &dev_attr_lpfc_drvr_version,
3278 &dev_attr_lpfc_enable_fip,
3016 &dev_attr_lpfc_temp_sensor, 3279 &dev_attr_lpfc_temp_sensor,
3017 &dev_attr_lpfc_log_verbose, 3280 &dev_attr_lpfc_log_verbose,
3018 &dev_attr_lpfc_lun_queue_depth, 3281 &dev_attr_lpfc_lun_queue_depth,
@@ -3020,7 +3283,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
3020 &dev_attr_lpfc_peer_port_login, 3283 &dev_attr_lpfc_peer_port_login,
3021 &dev_attr_lpfc_nodev_tmo, 3284 &dev_attr_lpfc_nodev_tmo,
3022 &dev_attr_lpfc_devloss_tmo, 3285 &dev_attr_lpfc_devloss_tmo,
3023 &dev_attr_lpfc_enable_fip,
3024 &dev_attr_lpfc_fcp_class, 3286 &dev_attr_lpfc_fcp_class,
3025 &dev_attr_lpfc_use_adisc, 3287 &dev_attr_lpfc_use_adisc,
3026 &dev_attr_lpfc_ack0, 3288 &dev_attr_lpfc_ack0,
@@ -3061,6 +3323,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
3061 &dev_attr_lpfc_max_scsicmpl_time, 3323 &dev_attr_lpfc_max_scsicmpl_time,
3062 &dev_attr_lpfc_stat_data_ctrl, 3324 &dev_attr_lpfc_stat_data_ctrl,
3063 &dev_attr_lpfc_prot_sg_seg_cnt, 3325 &dev_attr_lpfc_prot_sg_seg_cnt,
3326 &dev_attr_lpfc_aer_support,
3327 &dev_attr_lpfc_aer_state_cleanup,
3328 &dev_attr_lpfc_suppress_link_up,
3064 NULL, 3329 NULL,
3065}; 3330};
3066 3331
@@ -3073,7 +3338,6 @@ struct device_attribute *lpfc_vport_attrs[] = {
3073 &dev_attr_lpfc_lun_queue_depth, 3338 &dev_attr_lpfc_lun_queue_depth,
3074 &dev_attr_lpfc_nodev_tmo, 3339 &dev_attr_lpfc_nodev_tmo,
3075 &dev_attr_lpfc_devloss_tmo, 3340 &dev_attr_lpfc_devloss_tmo,
3076 &dev_attr_lpfc_enable_fip,
3077 &dev_attr_lpfc_hba_queue_depth, 3341 &dev_attr_lpfc_hba_queue_depth,
3078 &dev_attr_lpfc_peer_port_login, 3342 &dev_attr_lpfc_peer_port_login,
3079 &dev_attr_lpfc_restrict_login, 3343 &dev_attr_lpfc_restrict_login,
@@ -3147,7 +3411,7 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3147 * sysfs_ctlreg_read - Read method for reading from ctlreg 3411 * sysfs_ctlreg_read - Read method for reading from ctlreg
3148 * @kobj: kernel kobject that contains the kernel class device. 3412 * @kobj: kernel kobject that contains the kernel class device.
3149 * @bin_attr: kernel attributes passed to us. 3413 * @bin_attr: kernel attributes passed to us.
3150 * @buf: if succesful contains the data from the adapter IOREG space. 3414 * @buf: if successful contains the data from the adapter IOREG space.
3151 * @off: offset into buffer to beginning of data. 3415 * @off: offset into buffer to beginning of data.
3152 * @count: bytes to transfer. 3416 * @count: bytes to transfer.
3153 * 3417 *
@@ -3815,7 +4079,11 @@ lpfc_get_stats(struct Scsi_Host *shost)
3815 hs->invalid_crc_count -= lso->invalid_crc_count; 4079 hs->invalid_crc_count -= lso->invalid_crc_count;
3816 hs->error_frames -= lso->error_frames; 4080 hs->error_frames -= lso->error_frames;
3817 4081
3818 if (phba->fc_topology == TOPOLOGY_LOOP) { 4082 if (phba->hba_flag & HBA_FCOE_SUPPORT) {
4083 hs->lip_count = -1;
4084 hs->nos_count = (phba->link_events >> 1);
4085 hs->nos_count -= lso->link_events;
4086 } else if (phba->fc_topology == TOPOLOGY_LOOP) {
3819 hs->lip_count = (phba->fc_eventTag >> 1); 4087 hs->lip_count = (phba->fc_eventTag >> 1);
3820 hs->lip_count -= lso->link_events; 4088 hs->lip_count -= lso->link_events;
3821 hs->nos_count = -1; 4089 hs->nos_count = -1;
@@ -3906,7 +4174,10 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3906 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 4174 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
3907 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 4175 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
3908 lso->error_frames = pmb->un.varRdLnk.crcCnt; 4176 lso->error_frames = pmb->un.varRdLnk.crcCnt;
3909 lso->link_events = (phba->fc_eventTag >> 1); 4177 if (phba->hba_flag & HBA_FCOE_SUPPORT)
4178 lso->link_events = (phba->link_events >> 1);
4179 else
4180 lso->link_events = (phba->fc_eventTag >> 1);
3910 4181
3911 psli->stats_start = get_seconds(); 4182 psli->stats_start = get_seconds();
3912 4183
@@ -4222,15 +4493,18 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4222 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4493 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4223 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4494 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4224 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4495 lpfc_enable_bg_init(phba, lpfc_enable_bg);
4496 if (phba->sli_rev == LPFC_SLI_REV4)
4497 phba->cfg_poll = 0;
4498 else
4225 phba->cfg_poll = lpfc_poll; 4499 phba->cfg_poll = lpfc_poll;
4226 phba->cfg_soft_wwnn = 0L; 4500 phba->cfg_soft_wwnn = 0L;
4227 phba->cfg_soft_wwpn = 0L; 4501 phba->cfg_soft_wwpn = 0L;
4228 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4502 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4229 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4503 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4230 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4504 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4231 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4232 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 4505 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4233 4506 lpfc_aer_support_init(phba, lpfc_aer_support);
4507 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
4234 return; 4508 return;
4235} 4509}
4236 4510
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index da6bf5aac9dd..d62b3e467926 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -21,17 +21,21 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/mempool.h> 22#include <linux/mempool.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
24 26
25#include <scsi/scsi.h> 27#include <scsi/scsi.h>
26#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
27#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
28#include <scsi/scsi_bsg_fc.h> 30#include <scsi/scsi_bsg_fc.h>
31#include <scsi/fc/fc_fs.h>
29 32
30#include "lpfc_hw4.h" 33#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 34#include "lpfc_hw.h"
32#include "lpfc_sli.h" 35#include "lpfc_sli.h"
33#include "lpfc_sli4.h" 36#include "lpfc_sli4.h"
34#include "lpfc_nl.h" 37#include "lpfc_nl.h"
38#include "lpfc_bsg.h"
35#include "lpfc_disc.h" 39#include "lpfc_disc.h"
36#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
37#include "lpfc.h" 41#include "lpfc.h"
@@ -40,14 +44,196 @@
40#include "lpfc_vport.h" 44#include "lpfc_vport.h"
41#include "lpfc_version.h" 45#include "lpfc_version.h"
42 46
47struct lpfc_bsg_event {
48 struct list_head node;
49 struct kref kref;
50 wait_queue_head_t wq;
51
52 /* Event type and waiter identifiers */
53 uint32_t type_mask;
54 uint32_t req_id;
55 uint32_t reg_id;
56
57 /* next two flags are here for the auto-delete logic */
58 unsigned long wait_time_stamp;
59 int waiting;
60
61 /* seen and not seen events */
62 struct list_head events_to_get;
63 struct list_head events_to_see;
64
65 /* job waiting for this event to finish */
66 struct fc_bsg_job *set_job;
67};
68
69struct lpfc_bsg_iocb {
70 struct lpfc_iocbq *cmdiocbq;
71 struct lpfc_iocbq *rspiocbq;
72 struct lpfc_dmabuf *bmp;
73 struct lpfc_nodelist *ndlp;
74
75 /* job waiting for this iocb to finish */
76 struct fc_bsg_job *set_job;
77};
78
79struct lpfc_bsg_mbox {
80 LPFC_MBOXQ_t *pmboxq;
81 MAILBOX_t *mb;
82
83 /* job waiting for this mbox command to finish */
84 struct fc_bsg_job *set_job;
85};
86
87#define MENLO_DID 0x0000FC0E
88
89struct lpfc_bsg_menlo {
90 struct lpfc_iocbq *cmdiocbq;
91 struct lpfc_iocbq *rspiocbq;
92 struct lpfc_dmabuf *bmp;
93
94 /* job waiting for this iocb to finish */
95 struct fc_bsg_job *set_job;
96};
97
98#define TYPE_EVT 1
99#define TYPE_IOCB 2
100#define TYPE_MBOX 3
101#define TYPE_MENLO 4
102struct bsg_job_data {
103 uint32_t type;
104 union {
105 struct lpfc_bsg_event *evt;
106 struct lpfc_bsg_iocb iocb;
107 struct lpfc_bsg_mbox mbox;
108 struct lpfc_bsg_menlo menlo;
109 } context_un;
110};
111
112struct event_data {
113 struct list_head node;
114 uint32_t type;
115 uint32_t immed_dat;
116 void *data;
117 uint32_t len;
118};
119
120#define BUF_SZ_4K 4096
121#define SLI_CT_ELX_LOOPBACK 0x10
122
123enum ELX_LOOPBACK_CMD {
124 ELX_LOOPBACK_XRI_SETUP,
125 ELX_LOOPBACK_DATA,
126};
127
128#define ELX_LOOPBACK_HEADER_SZ \
129 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
130
131struct lpfc_dmabufext {
132 struct lpfc_dmabuf dma;
133 uint32_t size;
134 uint32_t flag;
135};
136
137/**
138 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
139 * @phba: Pointer to HBA context object.
140 * @cmdiocbq: Pointer to command iocb.
141 * @rspiocbq: Pointer to response iocb.
142 *
143 * This function is the completion handler for iocbs issued using
144 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
145 * ring event handler function without any lock held. This function
146 * can be called from both worker thread context and interrupt
147 * context. This function also can be called from another thread which
148 * cleans up the SLI layer objects.
149 * This function copies the contents of the response iocb to the
150 * response iocb memory object provided by the caller of
151 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
152 * sleeps for the iocb completion.
153 **/
154static void
155lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
156 struct lpfc_iocbq *cmdiocbq,
157 struct lpfc_iocbq *rspiocbq)
158{
159 unsigned long iflags;
160 struct bsg_job_data *dd_data;
161 struct fc_bsg_job *job;
162 IOCB_t *rsp;
163 struct lpfc_dmabuf *bmp;
164 struct lpfc_nodelist *ndlp;
165 struct lpfc_bsg_iocb *iocb;
166 unsigned long flags;
167 int rc = 0;
168
169 spin_lock_irqsave(&phba->ct_ev_lock, flags);
170 dd_data = cmdiocbq->context1;
171 if (!dd_data) {
172 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
173 return;
174 }
175
176 iocb = &dd_data->context_un.iocb;
177 job = iocb->set_job;
178 job->dd_data = NULL; /* so timeout handler does not reply */
179
180 spin_lock_irqsave(&phba->hbalock, iflags);
181 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
182 if (cmdiocbq->context2 && rspiocbq)
183 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
184 &rspiocbq->iocb, sizeof(IOCB_t));
185 spin_unlock_irqrestore(&phba->hbalock, iflags);
186
187 bmp = iocb->bmp;
188 rspiocbq = iocb->rspiocbq;
189 rsp = &rspiocbq->iocb;
190 ndlp = iocb->ndlp;
191
192 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
193 job->request_payload.sg_cnt, DMA_TO_DEVICE);
194 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
195 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
196
197 if (rsp->ulpStatus) {
198 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
199 switch (rsp->un.ulpWord[4] & 0xff) {
200 case IOERR_SEQUENCE_TIMEOUT:
201 rc = -ETIMEDOUT;
202 break;
203 case IOERR_INVALID_RPI:
204 rc = -EFAULT;
205 break;
206 default:
207 rc = -EACCES;
208 break;
209 }
210 } else
211 rc = -EACCES;
212 } else
213 job->reply->reply_payload_rcv_len =
214 rsp->un.genreq64.bdl.bdeSize;
215
216 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
217 lpfc_sli_release_iocbq(phba, rspiocbq);
218 lpfc_sli_release_iocbq(phba, cmdiocbq);
219 lpfc_nlp_put(ndlp);
220 kfree(bmp);
221 kfree(dd_data);
222 /* make error code available to userspace */
223 job->reply->result = rc;
224 /* complete the job back to userspace */
225 job->job_done(job);
226 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
227 return;
228}
229
43/** 230/**
44 * lpfc_bsg_rport_ct - send a CT command from a bsg request 231 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
45 * @job: fc_bsg_job to handle 232 * @job: fc_bsg_job to handle
46 */ 233 **/
47static int 234static int
48lpfc_bsg_rport_ct(struct fc_bsg_job *job) 235lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
49{ 236{
50 struct Scsi_Host *shost = job->shost;
51 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 237 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
52 struct lpfc_hba *phba = vport->phba; 238 struct lpfc_hba *phba = vport->phba;
53 struct lpfc_rport_data *rdata = job->rport->dd_data; 239 struct lpfc_rport_data *rdata = job->rport->dd_data;
@@ -64,57 +250,60 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
64 struct scatterlist *sgel = NULL; 250 struct scatterlist *sgel = NULL;
65 int numbde; 251 int numbde;
66 dma_addr_t busaddr; 252 dma_addr_t busaddr;
253 struct bsg_job_data *dd_data;
254 uint32_t creg_val;
67 int rc = 0; 255 int rc = 0;
68 256
69 /* in case no data is transferred */ 257 /* in case no data is transferred */
70 job->reply->reply_payload_rcv_len = 0; 258 job->reply->reply_payload_rcv_len = 0;
71 259
260 /* allocate our bsg tracking structure */
261 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
262 if (!dd_data) {
263 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
264 "2733 Failed allocation of dd_data\n");
265 rc = -ENOMEM;
266 goto no_dd_data;
267 }
268
72 if (!lpfc_nlp_get(ndlp)) { 269 if (!lpfc_nlp_get(ndlp)) {
73 job->reply->result = -ENODEV; 270 rc = -ENODEV;
74 return 0; 271 goto no_ndlp;
272 }
273
274 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
275 if (!bmp) {
276 rc = -ENOMEM;
277 goto free_ndlp;
75 } 278 }
76 279
77 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 280 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
78 rc = -ENODEV; 281 rc = -ENODEV;
79 goto free_ndlp_exit; 282 goto free_bmp;
80 } 283 }
81 284
82 spin_lock_irq(shost->host_lock);
83 cmdiocbq = lpfc_sli_get_iocbq(phba); 285 cmdiocbq = lpfc_sli_get_iocbq(phba);
84 if (!cmdiocbq) { 286 if (!cmdiocbq) {
85 rc = -ENOMEM; 287 rc = -ENOMEM;
86 spin_unlock_irq(shost->host_lock); 288 goto free_bmp;
87 goto free_ndlp_exit;
88 } 289 }
89 cmd = &cmdiocbq->iocb;
90 290
291 cmd = &cmdiocbq->iocb;
91 rspiocbq = lpfc_sli_get_iocbq(phba); 292 rspiocbq = lpfc_sli_get_iocbq(phba);
92 if (!rspiocbq) { 293 if (!rspiocbq) {
93 rc = -ENOMEM; 294 rc = -ENOMEM;
94 goto free_cmdiocbq; 295 goto free_cmdiocbq;
95 } 296 }
96 spin_unlock_irq(shost->host_lock);
97 297
98 rsp = &rspiocbq->iocb; 298 rsp = &rspiocbq->iocb;
99
100 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
101 if (!bmp) {
102 rc = -ENOMEM;
103 spin_lock_irq(shost->host_lock);
104 goto free_rspiocbq;
105 }
106
107 spin_lock_irq(shost->host_lock);
108 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 299 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
109 if (!bmp->virt) { 300 if (!bmp->virt) {
110 rc = -ENOMEM; 301 rc = -ENOMEM;
111 goto free_bmp; 302 goto free_rspiocbq;
112 } 303 }
113 spin_unlock_irq(shost->host_lock);
114 304
115 INIT_LIST_HEAD(&bmp->list); 305 INIT_LIST_HEAD(&bmp->list);
116 bpl = (struct ulp_bde64 *) bmp->virt; 306 bpl = (struct ulp_bde64 *) bmp->virt;
117
118 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 307 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
119 job->request_payload.sg_cnt, DMA_TO_DEVICE); 308 job->request_payload.sg_cnt, DMA_TO_DEVICE);
120 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 309 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
@@ -148,86 +337,160 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
148 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 337 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
149 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 338 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
150 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 339 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
151 cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; 340 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
152 cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; 341 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
153 cmd->ulpBdeCount = 1; 342 cmd->ulpBdeCount = 1;
154 cmd->ulpLe = 1; 343 cmd->ulpLe = 1;
155 cmd->ulpClass = CLASS3; 344 cmd->ulpClass = CLASS3;
156 cmd->ulpContext = ndlp->nlp_rpi; 345 cmd->ulpContext = ndlp->nlp_rpi;
157 cmd->ulpOwner = OWN_CHIP; 346 cmd->ulpOwner = OWN_CHIP;
158 cmdiocbq->vport = phba->pport; 347 cmdiocbq->vport = phba->pport;
159 cmdiocbq->context1 = NULL; 348 cmdiocbq->context3 = bmp;
160 cmdiocbq->context2 = NULL;
161 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 349 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
162
163 timeout = phba->fc_ratov * 2; 350 timeout = phba->fc_ratov * 2;
164 job->dd_data = cmdiocbq; 351 cmd->ulpTimeout = timeout;
165 352
166 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, 353 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
167 timeout + LPFC_DRVR_TIMEOUT); 354 cmdiocbq->context1 = dd_data;
168 355 cmdiocbq->context2 = rspiocbq;
169 if (rc != IOCB_TIMEDOUT) { 356 dd_data->type = TYPE_IOCB;
170 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 357 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
171 job->request_payload.sg_cnt, DMA_TO_DEVICE); 358 dd_data->context_un.iocb.rspiocbq = rspiocbq;
172 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 359 dd_data->context_un.iocb.set_job = job;
173 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 360 dd_data->context_un.iocb.bmp = bmp;
361 dd_data->context_un.iocb.ndlp = ndlp;
362
363 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
364 creg_val = readl(phba->HCregaddr);
365 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
366 writel(creg_val, phba->HCregaddr);
367 readl(phba->HCregaddr); /* flush */
174 } 368 }
175 369
176 if (rc == IOCB_TIMEDOUT) { 370 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
177 lpfc_sli_release_iocbq(phba, rspiocbq);
178 rc = -EACCES;
179 goto free_ndlp_exit;
180 }
181 371
182 if (rc != IOCB_SUCCESS) { 372 if (rc == IOCB_SUCCESS)
183 rc = -EACCES; 373 return 0; /* done for now */
184 goto free_outdmp;
185 }
186 374
187 if (rsp->ulpStatus) { 375 /* iocb failed so cleanup */
188 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 376 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
189 switch (rsp->un.ulpWord[4] & 0xff) { 377 job->request_payload.sg_cnt, DMA_TO_DEVICE);
190 case IOERR_SEQUENCE_TIMEOUT: 378 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
191 rc = -ETIMEDOUT; 379 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
192 break;
193 case IOERR_INVALID_RPI:
194 rc = -EFAULT;
195 break;
196 default:
197 rc = -EACCES;
198 break;
199 }
200 goto free_outdmp;
201 }
202 } else
203 job->reply->reply_payload_rcv_len =
204 rsp->un.genreq64.bdl.bdeSize;
205 380
206free_outdmp:
207 spin_lock_irq(shost->host_lock);
208 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 381 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
209free_bmp: 382
210 kfree(bmp);
211free_rspiocbq: 383free_rspiocbq:
212 lpfc_sli_release_iocbq(phba, rspiocbq); 384 lpfc_sli_release_iocbq(phba, rspiocbq);
213free_cmdiocbq: 385free_cmdiocbq:
214 lpfc_sli_release_iocbq(phba, cmdiocbq); 386 lpfc_sli_release_iocbq(phba, cmdiocbq);
215 spin_unlock_irq(shost->host_lock); 387free_bmp:
216free_ndlp_exit: 388 kfree(bmp);
389free_ndlp:
217 lpfc_nlp_put(ndlp); 390 lpfc_nlp_put(ndlp);
391no_ndlp:
392 kfree(dd_data);
393no_dd_data:
394 /* make error code available to userspace */
395 job->reply->result = rc;
396 job->dd_data = NULL;
397 return rc;
398}
218 399
400/**
401 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
402 * @phba: Pointer to HBA context object.
403 * @cmdiocbq: Pointer to command iocb.
404 * @rspiocbq: Pointer to response iocb.
405 *
406 * This function is the completion handler for iocbs issued using
407 * lpfc_bsg_rport_els_cmp function. This function is called by the
408 * ring event handler function without any lock held. This function
409 * can be called from both worker thread context and interrupt
410 * context. This function also can be called from other thread which
411 * cleans up the SLI layer objects.
412 * This function copies the contents of the response iocb to the
413 * response iocb memory object provided by the caller of
414 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
415 * sleeps for the iocb completion.
416 **/
417static void
418lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
419 struct lpfc_iocbq *cmdiocbq,
420 struct lpfc_iocbq *rspiocbq)
421{
422 struct bsg_job_data *dd_data;
423 struct fc_bsg_job *job;
424 IOCB_t *rsp;
425 struct lpfc_nodelist *ndlp;
426 struct lpfc_dmabuf *pbuflist = NULL;
427 struct fc_bsg_ctels_reply *els_reply;
428 uint8_t *rjt_data;
429 unsigned long flags;
430 int rc = 0;
431
432 spin_lock_irqsave(&phba->ct_ev_lock, flags);
433 dd_data = cmdiocbq->context1;
434 /* normal completion and timeout crossed paths, already done */
435 if (!dd_data) {
436 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
437 return;
438 }
439
440 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
441 if (cmdiocbq->context2 && rspiocbq)
442 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
443 &rspiocbq->iocb, sizeof(IOCB_t));
444
445 job = dd_data->context_un.iocb.set_job;
446 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
447 rspiocbq = dd_data->context_un.iocb.rspiocbq;
448 rsp = &rspiocbq->iocb;
449 ndlp = dd_data->context_un.iocb.ndlp;
450
451 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
452 job->request_payload.sg_cnt, DMA_TO_DEVICE);
453 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
454 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
455
456 if (job->reply->result == -EAGAIN)
457 rc = -EAGAIN;
458 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
459 job->reply->reply_payload_rcv_len =
460 rsp->un.elsreq64.bdl.bdeSize;
461 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
462 job->reply->reply_payload_rcv_len =
463 sizeof(struct fc_bsg_ctels_reply);
464 /* LS_RJT data returned in word 4 */
465 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
466 els_reply = &job->reply->reply_data.ctels_reply;
467 els_reply->status = FC_CTELS_STATUS_REJECT;
468 els_reply->rjt_data.action = rjt_data[3];
469 els_reply->rjt_data.reason_code = rjt_data[2];
470 els_reply->rjt_data.reason_explanation = rjt_data[1];
471 els_reply->rjt_data.vendor_unique = rjt_data[0];
472 } else
473 rc = -EIO;
474
475 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
476 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
477 lpfc_sli_release_iocbq(phba, rspiocbq);
478 lpfc_sli_release_iocbq(phba, cmdiocbq);
479 lpfc_nlp_put(ndlp);
480 kfree(dd_data);
219 /* make error code available to userspace */ 481 /* make error code available to userspace */
220 job->reply->result = rc; 482 job->reply->result = rc;
483 job->dd_data = NULL;
221 /* complete the job back to userspace */ 484 /* complete the job back to userspace */
222 job->job_done(job); 485 job->job_done(job);
223 486 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
224 return 0; 487 return;
225} 488}
226 489
227/** 490/**
228 * lpfc_bsg_rport_els - send an ELS command from a bsg request 491 * lpfc_bsg_rport_els - send an ELS command from a bsg request
229 * @job: fc_bsg_job to handle 492 * @job: fc_bsg_job to handle
230 */ 493 **/
231static int 494static int
232lpfc_bsg_rport_els(struct fc_bsg_job *job) 495lpfc_bsg_rport_els(struct fc_bsg_job *job)
233{ 496{
@@ -235,7 +498,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
235 struct lpfc_hba *phba = vport->phba; 498 struct lpfc_hba *phba = vport->phba;
236 struct lpfc_rport_data *rdata = job->rport->dd_data; 499 struct lpfc_rport_data *rdata = job->rport->dd_data;
237 struct lpfc_nodelist *ndlp = rdata->pnode; 500 struct lpfc_nodelist *ndlp = rdata->pnode;
238
239 uint32_t elscmd; 501 uint32_t elscmd;
240 uint32_t cmdsize; 502 uint32_t cmdsize;
241 uint32_t rspsize; 503 uint32_t rspsize;
@@ -247,20 +509,30 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
247 struct lpfc_dmabuf *prsp; 509 struct lpfc_dmabuf *prsp;
248 struct lpfc_dmabuf *pbuflist = NULL; 510 struct lpfc_dmabuf *pbuflist = NULL;
249 struct ulp_bde64 *bpl; 511 struct ulp_bde64 *bpl;
250 int iocb_status;
251 int request_nseg; 512 int request_nseg;
252 int reply_nseg; 513 int reply_nseg;
253 struct scatterlist *sgel = NULL; 514 struct scatterlist *sgel = NULL;
254 int numbde; 515 int numbde;
255 dma_addr_t busaddr; 516 dma_addr_t busaddr;
517 struct bsg_job_data *dd_data;
518 uint32_t creg_val;
256 int rc = 0; 519 int rc = 0;
257 520
258 /* in case no data is transferred */ 521 /* in case no data is transferred */
259 job->reply->reply_payload_rcv_len = 0; 522 job->reply->reply_payload_rcv_len = 0;
260 523
524 /* allocate our bsg tracking structure */
525 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
526 if (!dd_data) {
527 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
528 "2735 Failed allocation of dd_data\n");
529 rc = -ENOMEM;
530 goto no_dd_data;
531 }
532
261 if (!lpfc_nlp_get(ndlp)) { 533 if (!lpfc_nlp_get(ndlp)) {
262 rc = -ENODEV; 534 rc = -ENODEV;
263 goto out; 535 goto free_dd_data;
264 } 536 }
265 537
266 elscmd = job->request->rqst_data.r_els.els_code; 538 elscmd = job->request->rqst_data.r_els.els_code;
@@ -270,24 +542,24 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
270 if (!rspiocbq) { 542 if (!rspiocbq) {
271 lpfc_nlp_put(ndlp); 543 lpfc_nlp_put(ndlp);
272 rc = -ENOMEM; 544 rc = -ENOMEM;
273 goto out; 545 goto free_dd_data;
274 } 546 }
275 547
276 rsp = &rspiocbq->iocb; 548 rsp = &rspiocbq->iocb;
277 rpi = ndlp->nlp_rpi; 549 rpi = ndlp->nlp_rpi;
278 550
279 cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp, 551 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
280 ndlp->nlp_DID, elscmd); 552 ndlp->nlp_DID, elscmd);
281
282 if (!cmdiocbq) { 553 if (!cmdiocbq) {
283 lpfc_sli_release_iocbq(phba, rspiocbq); 554 rc = -EIO;
284 return -EIO; 555 goto free_rspiocbq;
285 } 556 }
286 557
287 job->dd_data = cmdiocbq; 558 /* prep els iocb set context1 to the ndlp, context2 to the command
559 * dmabuf, context3 holds the data dmabuf
560 */
288 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 561 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
289 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 562 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
290
291 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 563 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
292 kfree(pcmd); 564 kfree(pcmd);
293 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 565 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
@@ -299,7 +571,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
299 571
300 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 572 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
301 job->request_payload.sg_cnt, DMA_TO_DEVICE); 573 job->request_payload.sg_cnt, DMA_TO_DEVICE);
302
303 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 574 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
304 busaddr = sg_dma_address(sgel); 575 busaddr = sg_dma_address(sgel);
305 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 576 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
@@ -321,7 +592,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
321 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 592 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
322 bpl++; 593 bpl++;
323 } 594 }
324
325 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = 595 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
326 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 596 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
327 cmdiocbq->iocb.ulpContext = rpi; 597 cmdiocbq->iocb.ulpContext = rpi;
@@ -329,102 +599,62 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
329 cmdiocbq->context1 = NULL; 599 cmdiocbq->context1 = NULL;
330 cmdiocbq->context2 = NULL; 600 cmdiocbq->context2 = NULL;
331 601
332 iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 602 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
333 rspiocbq, (phba->fc_ratov * 2) 603 cmdiocbq->context1 = dd_data;
334 + LPFC_DRVR_TIMEOUT); 604 cmdiocbq->context2 = rspiocbq;
335 605 dd_data->type = TYPE_IOCB;
336 /* release the new ndlp once the iocb completes */ 606 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
337 lpfc_nlp_put(ndlp); 607 dd_data->context_un.iocb.rspiocbq = rspiocbq;
338 if (iocb_status != IOCB_TIMEDOUT) { 608 dd_data->context_un.iocb.set_job = job;
339 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 609 dd_data->context_un.iocb.bmp = NULL;;
340 job->request_payload.sg_cnt, DMA_TO_DEVICE); 610 dd_data->context_un.iocb.ndlp = ndlp;
341 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 611
342 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 612 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
613 creg_val = readl(phba->HCregaddr);
614 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
615 writel(creg_val, phba->HCregaddr);
616 readl(phba->HCregaddr); /* flush */
343 } 617 }
618 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
619 lpfc_nlp_put(ndlp);
620 if (rc == IOCB_SUCCESS)
621 return 0; /* done for now */
344 622
345 if (iocb_status == IOCB_SUCCESS) { 623 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
346 if (rsp->ulpStatus == IOSTAT_SUCCESS) { 624 job->request_payload.sg_cnt, DMA_TO_DEVICE);
347 job->reply->reply_payload_rcv_len = 625 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
348 rsp->un.elsreq64.bdl.bdeSize; 626 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
349 rc = 0;
350 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
351 struct fc_bsg_ctels_reply *els_reply;
352 /* LS_RJT data returned in word 4 */
353 uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
354
355 els_reply = &job->reply->reply_data.ctels_reply;
356 job->reply->result = 0;
357 els_reply->status = FC_CTELS_STATUS_REJECT;
358 els_reply->rjt_data.action = rjt_data[0];
359 els_reply->rjt_data.reason_code = rjt_data[1];
360 els_reply->rjt_data.reason_explanation = rjt_data[2];
361 els_reply->rjt_data.vendor_unique = rjt_data[3];
362 } else
363 rc = -EIO;
364 } else
365 rc = -EIO;
366 627
367 if (iocb_status != IOCB_TIMEDOUT) 628 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
368 lpfc_els_free_iocb(phba, cmdiocbq); 629
630 lpfc_sli_release_iocbq(phba, cmdiocbq);
369 631
632free_rspiocbq:
370 lpfc_sli_release_iocbq(phba, rspiocbq); 633 lpfc_sli_release_iocbq(phba, rspiocbq);
371 634
372out: 635free_dd_data:
636 kfree(dd_data);
637
638no_dd_data:
373 /* make error code available to userspace */ 639 /* make error code available to userspace */
374 job->reply->result = rc; 640 job->reply->result = rc;
375 /* complete the job back to userspace */ 641 job->dd_data = NULL;
376 job->job_done(job); 642 return rc;
377
378 return 0;
379}
380
381struct lpfc_ct_event {
382 struct list_head node;
383 int ref;
384 wait_queue_head_t wq;
385
386 /* Event type and waiter identifiers */
387 uint32_t type_mask;
388 uint32_t req_id;
389 uint32_t reg_id;
390
391 /* next two flags are here for the auto-delete logic */
392 unsigned long wait_time_stamp;
393 int waiting;
394
395 /* seen and not seen events */
396 struct list_head events_to_get;
397 struct list_head events_to_see;
398};
399
400struct event_data {
401 struct list_head node;
402 uint32_t type;
403 uint32_t immed_dat;
404 void *data;
405 uint32_t len;
406};
407
408static struct lpfc_ct_event *
409lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
410{
411 struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
412 if (!evt)
413 return NULL;
414
415 INIT_LIST_HEAD(&evt->events_to_get);
416 INIT_LIST_HEAD(&evt->events_to_see);
417 evt->req_id = ev_req_id;
418 evt->reg_id = ev_reg_id;
419 evt->wait_time_stamp = jiffies;
420 init_waitqueue_head(&evt->wq);
421
422 return evt;
423} 643}
424 644
645/**
646 * lpfc_bsg_event_free - frees an allocated event structure
647 * @kref: Pointer to a kref.
648 *
649 * Called from kref_put. Back cast the kref into an event structure address.
650 * Free any events to get, delete associated nodes, free any events to see,
651 * free any data then free the event itself.
652 **/
425static void 653static void
426lpfc_ct_event_free(struct lpfc_ct_event *evt) 654lpfc_bsg_event_free(struct kref *kref)
427{ 655{
656 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
657 kref);
428 struct event_data *ed; 658 struct event_data *ed;
429 659
430 list_del(&evt->node); 660 list_del(&evt->node);
@@ -446,25 +676,82 @@ lpfc_ct_event_free(struct lpfc_ct_event *evt)
446 kfree(evt); 676 kfree(evt);
447} 677}
448 678
679/**
680 * lpfc_bsg_event_ref - increments the kref for an event
681 * @evt: Pointer to an event structure.
682 **/
449static inline void 683static inline void
450lpfc_ct_event_ref(struct lpfc_ct_event *evt) 684lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
451{ 685{
452 evt->ref++; 686 kref_get(&evt->kref);
453} 687}
454 688
689/**
690 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
691 * @evt: Pointer to an event structure.
692 **/
455static inline void 693static inline void
456lpfc_ct_event_unref(struct lpfc_ct_event *evt) 694lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
457{ 695{
458 if (--evt->ref < 0) 696 kref_put(&evt->kref, lpfc_bsg_event_free);
459 lpfc_ct_event_free(evt);
460} 697}
461 698
462#define SLI_CT_ELX_LOOPBACK 0x10 699/**
700 * lpfc_bsg_event_new - allocate and initialize a event structure
701 * @ev_mask: Mask of events.
702 * @ev_reg_id: Event reg id.
703 * @ev_req_id: Event request id.
704 **/
705static struct lpfc_bsg_event *
706lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
707{
708 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
463 709
464enum ELX_LOOPBACK_CMD { 710 if (!evt)
465 ELX_LOOPBACK_XRI_SETUP, 711 return NULL;
466 ELX_LOOPBACK_DATA, 712
467}; 713 INIT_LIST_HEAD(&evt->events_to_get);
714 INIT_LIST_HEAD(&evt->events_to_see);
715 evt->type_mask = ev_mask;
716 evt->req_id = ev_req_id;
717 evt->reg_id = ev_reg_id;
718 evt->wait_time_stamp = jiffies;
719 init_waitqueue_head(&evt->wq);
720 kref_init(&evt->kref);
721 return evt;
722}
723
724/**
725 * diag_cmd_data_free - Frees an lpfc dma buffer extension
726 * @phba: Pointer to HBA context object.
727 * @mlist: Pointer to an lpfc dma buffer extension.
728 **/
729static int
730diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
731{
732 struct lpfc_dmabufext *mlast;
733 struct pci_dev *pcidev;
734 struct list_head head, *curr, *next;
735
736 if ((!mlist) || (!lpfc_is_link_up(phba) &&
737 (phba->link_flag & LS_LOOPBACK_MODE))) {
738 return 0;
739 }
740
741 pcidev = phba->pcidev;
742 list_add_tail(&head, &mlist->dma.list);
743
744 list_for_each_safe(curr, next, &head) {
745 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
746 if (mlast->dma.virt)
747 dma_free_coherent(&pcidev->dev,
748 mlast->size,
749 mlast->dma.virt,
750 mlast->dma.phys);
751 kfree(mlast);
752 }
753 return 0;
754}
468 755
469/** 756/**
470 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 757 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
@@ -473,9 +760,9 @@ enum ELX_LOOPBACK_CMD {
473 * @piocbq: 760 * @piocbq:
474 * 761 *
475 * This function is called when an unsolicited CT command is received. It 762 * This function is called when an unsolicited CT command is received. It
476 * forwards the event to any processes registerd to receive CT events. 763 * forwards the event to any processes registered to receive CT events.
477 */ 764 **/
478void 765int
479lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 766lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
480 struct lpfc_iocbq *piocbq) 767 struct lpfc_iocbq *piocbq)
481{ 768{
@@ -483,7 +770,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
483 uint32_t cmd; 770 uint32_t cmd;
484 uint32_t len; 771 uint32_t len;
485 struct lpfc_dmabuf *dmabuf = NULL; 772 struct lpfc_dmabuf *dmabuf = NULL;
486 struct lpfc_ct_event *evt; 773 struct lpfc_bsg_event *evt;
487 struct event_data *evt_dat = NULL; 774 struct event_data *evt_dat = NULL;
488 struct lpfc_iocbq *iocbq; 775 struct lpfc_iocbq *iocbq;
489 size_t offset = 0; 776 size_t offset = 0;
@@ -495,6 +782,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
495 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 782 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
496 struct lpfc_hbq_entry *hbqe; 783 struct lpfc_hbq_entry *hbqe;
497 struct lpfc_sli_ct_request *ct_req; 784 struct lpfc_sli_ct_request *ct_req;
785 struct fc_bsg_job *job = NULL;
786 unsigned long flags;
787 int size = 0;
498 788
499 INIT_LIST_HEAD(&head); 789 INIT_LIST_HEAD(&head);
500 list_add_tail(&head, &piocbq->list); 790 list_add_tail(&head, &piocbq->list);
@@ -503,6 +793,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
503 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 793 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
504 goto error_ct_unsol_exit; 794 goto error_ct_unsol_exit;
505 795
796 if (phba->link_state == LPFC_HBA_ERROR ||
797 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
798 goto error_ct_unsol_exit;
799
506 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 800 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
507 dmabuf = bdeBuf1; 801 dmabuf = bdeBuf1;
508 else { 802 else {
@@ -510,7 +804,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
510 piocbq->iocb.un.cont64[0].addrLow); 804 piocbq->iocb.un.cont64[0].addrLow);
511 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 805 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
512 } 806 }
513 807 if (dmabuf == NULL)
808 goto error_ct_unsol_exit;
514 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 809 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
515 evt_req_id = ct_req->FsType; 810 evt_req_id = ct_req->FsType;
516 cmd = ct_req->CommandResponse.bits.CmdRsp; 811 cmd = ct_req->CommandResponse.bits.CmdRsp;
@@ -518,24 +813,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
518 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 813 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
519 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 814 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
520 815
521 mutex_lock(&phba->ct_event_mutex); 816 spin_lock_irqsave(&phba->ct_ev_lock, flags);
522 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 817 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
523 if (evt->req_id != evt_req_id) 818 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
819 evt->req_id != evt_req_id)
524 continue; 820 continue;
525 821
526 lpfc_ct_event_ref(evt); 822 lpfc_bsg_event_ref(evt);
527 823 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
528 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 824 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
529 if (!evt_dat) { 825 if (evt_dat == NULL) {
530 lpfc_ct_event_unref(evt); 826 spin_lock_irqsave(&phba->ct_ev_lock, flags);
827 lpfc_bsg_event_unref(evt);
531 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 828 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
532 "2614 Memory allocation failed for " 829 "2614 Memory allocation failed for "
533 "CT event\n"); 830 "CT event\n");
534 break; 831 break;
535 } 832 }
536 833
537 mutex_unlock(&phba->ct_event_mutex);
538
539 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 834 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
540 /* take accumulated byte count from the last iocbq */ 835 /* take accumulated byte count from the last iocbq */
541 iocbq = list_entry(head.prev, typeof(*iocbq), list); 836 iocbq = list_entry(head.prev, typeof(*iocbq), list);
@@ -549,25 +844,25 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
549 } 844 }
550 845
551 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 846 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
552 if (!evt_dat->data) { 847 if (evt_dat->data == NULL) {
553 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 848 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
554 "2615 Memory allocation failed for " 849 "2615 Memory allocation failed for "
555 "CT event data, size %d\n", 850 "CT event data, size %d\n",
556 evt_dat->len); 851 evt_dat->len);
557 kfree(evt_dat); 852 kfree(evt_dat);
558 mutex_lock(&phba->ct_event_mutex); 853 spin_lock_irqsave(&phba->ct_ev_lock, flags);
559 lpfc_ct_event_unref(evt); 854 lpfc_bsg_event_unref(evt);
560 mutex_unlock(&phba->ct_event_mutex); 855 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
561 goto error_ct_unsol_exit; 856 goto error_ct_unsol_exit;
562 } 857 }
563 858
564 list_for_each_entry(iocbq, &head, list) { 859 list_for_each_entry(iocbq, &head, list) {
860 size = 0;
565 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 861 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
566 bdeBuf1 = iocbq->context2; 862 bdeBuf1 = iocbq->context2;
567 bdeBuf2 = iocbq->context3; 863 bdeBuf2 = iocbq->context3;
568 } 864 }
569 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 865 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
570 int size = 0;
571 if (phba->sli3_options & 866 if (phba->sli3_options &
572 LPFC_SLI3_HBQ_ENABLED) { 867 LPFC_SLI3_HBQ_ENABLED) {
573 if (i == 0) { 868 if (i == 0) {
@@ -600,9 +895,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
600 iocbq); 895 iocbq);
601 kfree(evt_dat->data); 896 kfree(evt_dat->data);
602 kfree(evt_dat); 897 kfree(evt_dat);
603 mutex_lock(&phba->ct_event_mutex); 898 spin_lock_irqsave(&phba->ct_ev_lock,
604 lpfc_ct_event_unref(evt); 899 flags);
605 mutex_unlock(&phba->ct_event_mutex); 900 lpfc_bsg_event_unref(evt);
901 spin_unlock_irqrestore(
902 &phba->ct_ev_lock, flags);
606 goto error_ct_unsol_exit; 903 goto error_ct_unsol_exit;
607 } 904 }
608 memcpy((char *)(evt_dat->data) + offset, 905 memcpy((char *)(evt_dat->data) + offset,
@@ -615,15 +912,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
615 dmabuf); 912 dmabuf);
616 } else { 913 } else {
617 switch (cmd) { 914 switch (cmd) {
915 case ELX_LOOPBACK_DATA:
916 diag_cmd_data_free(phba,
917 (struct lpfc_dmabufext *)
918 dmabuf);
919 break;
618 case ELX_LOOPBACK_XRI_SETUP: 920 case ELX_LOOPBACK_XRI_SETUP:
619 if (!(phba->sli3_options & 921 if ((phba->sli_rev ==
620 LPFC_SLI3_HBQ_ENABLED)) 922 LPFC_SLI_REV2) ||
923 (phba->sli3_options &
924 LPFC_SLI3_HBQ_ENABLED
925 )) {
926 lpfc_in_buf_free(phba,
927 dmabuf);
928 } else {
621 lpfc_post_buffer(phba, 929 lpfc_post_buffer(phba,
622 pring, 930 pring,
623 1); 931 1);
624 else 932 }
625 lpfc_in_buf_free(phba,
626 dmabuf);
627 break; 933 break;
628 default: 934 default:
629 if (!(phba->sli3_options & 935 if (!(phba->sli3_options &
@@ -637,7 +943,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
637 } 943 }
638 } 944 }
639 945
640 mutex_lock(&phba->ct_event_mutex); 946 spin_lock_irqsave(&phba->ct_ev_lock, flags);
641 if (phba->sli_rev == LPFC_SLI_REV4) { 947 if (phba->sli_rev == LPFC_SLI_REV4) {
642 evt_dat->immed_dat = phba->ctx_idx; 948 evt_dat->immed_dat = phba->ctx_idx;
643 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 949 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
@@ -650,122 +956,144 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
650 956
651 evt_dat->type = FC_REG_CT_EVENT; 957 evt_dat->type = FC_REG_CT_EVENT;
652 list_add(&evt_dat->node, &evt->events_to_see); 958 list_add(&evt_dat->node, &evt->events_to_see);
653 wake_up_interruptible(&evt->wq); 959 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
654 lpfc_ct_event_unref(evt); 960 wake_up_interruptible(&evt->wq);
655 if (evt_req_id == SLI_CT_ELX_LOOPBACK) 961 lpfc_bsg_event_unref(evt);
656 break; 962 break;
963 }
964
965 list_move(evt->events_to_see.prev, &evt->events_to_get);
966 lpfc_bsg_event_unref(evt);
967
968 job = evt->set_job;
969 evt->set_job = NULL;
970 if (job) {
971 job->reply->reply_payload_rcv_len = size;
972 /* make error code available to userspace */
973 job->reply->result = 0;
974 job->dd_data = NULL;
975 /* complete the job back to userspace */
976 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
977 job->job_done(job);
978 spin_lock_irqsave(&phba->ct_ev_lock, flags);
979 }
657 } 980 }
658 mutex_unlock(&phba->ct_event_mutex); 981 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
659 982
660error_ct_unsol_exit: 983error_ct_unsol_exit:
661 if (!list_empty(&head)) 984 if (!list_empty(&head))
662 list_del(&head); 985 list_del(&head);
663 986 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
664 return; 987 return 0;
988 return 1;
665} 989}
666 990
667/** 991/**
668 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command 992 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
669 * @job: SET_EVENT fc_bsg_job 993 * @job: SET_EVENT fc_bsg_job
670 */ 994 **/
671static int 995static int
672lpfc_bsg_set_event(struct fc_bsg_job *job) 996lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
673{ 997{
674 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 998 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
675 struct lpfc_hba *phba = vport->phba; 999 struct lpfc_hba *phba = vport->phba;
676 struct set_ct_event *event_req; 1000 struct set_ct_event *event_req;
677 struct lpfc_ct_event *evt; 1001 struct lpfc_bsg_event *evt;
678 int rc = 0; 1002 int rc = 0;
1003 struct bsg_job_data *dd_data = NULL;
1004 uint32_t ev_mask;
1005 unsigned long flags;
679 1006
680 if (job->request_len < 1007 if (job->request_len <
681 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1008 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
682 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1009 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
683 "2612 Received SET_CT_EVENT below minimum " 1010 "2612 Received SET_CT_EVENT below minimum "
684 "size\n"); 1011 "size\n");
685 return -EINVAL; 1012 rc = -EINVAL;
1013 goto job_error;
1014 }
1015
1016 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1017 if (dd_data == NULL) {
1018 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1019 "2734 Failed allocation of dd_data\n");
1020 rc = -ENOMEM;
1021 goto job_error;
686 } 1022 }
687 1023
688 event_req = (struct set_ct_event *) 1024 event_req = (struct set_ct_event *)
689 job->request->rqst_data.h_vendor.vendor_cmd; 1025 job->request->rqst_data.h_vendor.vendor_cmd;
690 1026 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
691 mutex_lock(&phba->ct_event_mutex); 1027 FC_REG_EVENT_MASK);
1028 spin_lock_irqsave(&phba->ct_ev_lock, flags);
692 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1029 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
693 if (evt->reg_id == event_req->ev_reg_id) { 1030 if (evt->reg_id == event_req->ev_reg_id) {
694 lpfc_ct_event_ref(evt); 1031 lpfc_bsg_event_ref(evt);
695 evt->wait_time_stamp = jiffies; 1032 evt->wait_time_stamp = jiffies;
696 break; 1033 break;
697 } 1034 }
698 } 1035 }
699 mutex_unlock(&phba->ct_event_mutex); 1036 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
700 1037
701 if (&evt->node == &phba->ct_ev_waiters) { 1038 if (&evt->node == &phba->ct_ev_waiters) {
702 /* no event waiting struct yet - first call */ 1039 /* no event waiting struct yet - first call */
703 evt = lpfc_ct_event_new(event_req->ev_reg_id, 1040 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
704 event_req->ev_req_id); 1041 event_req->ev_req_id);
705 if (!evt) { 1042 if (!evt) {
706 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1043 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
707 "2617 Failed allocation of event " 1044 "2617 Failed allocation of event "
708 "waiter\n"); 1045 "waiter\n");
709 return -ENOMEM; 1046 rc = -ENOMEM;
1047 goto job_error;
710 } 1048 }
711 1049
712 mutex_lock(&phba->ct_event_mutex); 1050 spin_lock_irqsave(&phba->ct_ev_lock, flags);
713 list_add(&evt->node, &phba->ct_ev_waiters); 1051 list_add(&evt->node, &phba->ct_ev_waiters);
714 lpfc_ct_event_ref(evt); 1052 lpfc_bsg_event_ref(evt);
715 mutex_unlock(&phba->ct_event_mutex); 1053 evt->wait_time_stamp = jiffies;
1054 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
716 } 1055 }
717 1056
1057 spin_lock_irqsave(&phba->ct_ev_lock, flags);
718 evt->waiting = 1; 1058 evt->waiting = 1;
719 if (wait_event_interruptible(evt->wq, 1059 dd_data->type = TYPE_EVT;
720 !list_empty(&evt->events_to_see))) { 1060 dd_data->context_un.evt = evt;
721 mutex_lock(&phba->ct_event_mutex); 1061 evt->set_job = job; /* for unsolicited command */
722 lpfc_ct_event_unref(evt); /* release ref */ 1062 job->dd_data = dd_data; /* for fc transport timeout callback*/
723 lpfc_ct_event_unref(evt); /* delete */ 1063 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
724 mutex_unlock(&phba->ct_event_mutex); 1064 return 0; /* call job done later */
725 rc = -EINTR; 1065
726 goto set_event_out; 1066job_error:
727 } 1067 if (dd_data != NULL)
728 1068 kfree(dd_data);
729 evt->wait_time_stamp = jiffies; 1069
730 evt->waiting = 0; 1070 job->dd_data = NULL;
731 1071 return rc;
732 mutex_lock(&phba->ct_event_mutex);
733 list_move(evt->events_to_see.prev, &evt->events_to_get);
734 lpfc_ct_event_unref(evt); /* release ref */
735 mutex_unlock(&phba->ct_event_mutex);
736
737set_event_out:
738 /* set_event carries no reply payload */
739 job->reply->reply_payload_rcv_len = 0;
740 /* make error code available to userspace */
741 job->reply->result = rc;
742 /* complete the job back to userspace */
743 job->job_done(job);
744
745 return 0;
746} 1072}
747 1073
748/** 1074/**
749 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command 1075 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
750 * @job: GET_EVENT fc_bsg_job 1076 * @job: GET_EVENT fc_bsg_job
751 */ 1077 **/
752static int 1078static int
753lpfc_bsg_get_event(struct fc_bsg_job *job) 1079lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
754{ 1080{
755 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1081 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
756 struct lpfc_hba *phba = vport->phba; 1082 struct lpfc_hba *phba = vport->phba;
757 struct get_ct_event *event_req; 1083 struct get_ct_event *event_req;
758 struct get_ct_event_reply *event_reply; 1084 struct get_ct_event_reply *event_reply;
759 struct lpfc_ct_event *evt; 1085 struct lpfc_bsg_event *evt;
760 struct event_data *evt_dat = NULL; 1086 struct event_data *evt_dat = NULL;
761 int rc = 0; 1087 unsigned long flags;
1088 uint32_t rc = 0;
762 1089
763 if (job->request_len < 1090 if (job->request_len <
764 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1091 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
765 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1092 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
766 "2613 Received GET_CT_EVENT request below " 1093 "2613 Received GET_CT_EVENT request below "
767 "minimum size\n"); 1094 "minimum size\n");
768 return -EINVAL; 1095 rc = -EINVAL;
1096 goto job_error;
769 } 1097 }
770 1098
771 event_req = (struct get_ct_event *) 1099 event_req = (struct get_ct_event *)
@@ -773,13 +1101,12 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
773 1101
774 event_reply = (struct get_ct_event_reply *) 1102 event_reply = (struct get_ct_event_reply *)
775 job->reply->reply_data.vendor_reply.vendor_rsp; 1103 job->reply->reply_data.vendor_reply.vendor_rsp;
776 1104 spin_lock_irqsave(&phba->ct_ev_lock, flags);
777 mutex_lock(&phba->ct_event_mutex);
778 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1105 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
779 if (evt->reg_id == event_req->ev_reg_id) { 1106 if (evt->reg_id == event_req->ev_reg_id) {
780 if (list_empty(&evt->events_to_get)) 1107 if (list_empty(&evt->events_to_get))
781 break; 1108 break;
782 lpfc_ct_event_ref(evt); 1109 lpfc_bsg_event_ref(evt);
783 evt->wait_time_stamp = jiffies; 1110 evt->wait_time_stamp = jiffies;
784 evt_dat = list_entry(evt->events_to_get.prev, 1111 evt_dat = list_entry(evt->events_to_get.prev,
785 struct event_data, node); 1112 struct event_data, node);
@@ -787,84 +1114,1904 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
787 break; 1114 break;
788 } 1115 }
789 } 1116 }
790 mutex_unlock(&phba->ct_event_mutex); 1117 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
791 1118
792 if (!evt_dat) { 1119 /* The app may continue to ask for event data until it gets
1120 * an error indicating that there isn't anymore
1121 */
1122 if (evt_dat == NULL) {
793 job->reply->reply_payload_rcv_len = 0; 1123 job->reply->reply_payload_rcv_len = 0;
794 rc = -ENOENT; 1124 rc = -ENOENT;
795 goto error_get_event_exit; 1125 goto job_error;
796 } 1126 }
797 1127
798 if (evt_dat->len > job->reply_payload.payload_len) { 1128 if (evt_dat->len > job->request_payload.payload_len) {
799 evt_dat->len = job->reply_payload.payload_len; 1129 evt_dat->len = job->request_payload.payload_len;
800 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1130 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
801 "2618 Truncated event data at %d " 1131 "2618 Truncated event data at %d "
802 "bytes\n", 1132 "bytes\n",
803 job->reply_payload.payload_len); 1133 job->request_payload.payload_len);
804 } 1134 }
805 1135
1136 event_reply->type = evt_dat->type;
806 event_reply->immed_data = evt_dat->immed_dat; 1137 event_reply->immed_data = evt_dat->immed_dat;
807
808 if (evt_dat->len > 0) 1138 if (evt_dat->len > 0)
809 job->reply->reply_payload_rcv_len = 1139 job->reply->reply_payload_rcv_len =
810 sg_copy_from_buffer(job->reply_payload.sg_list, 1140 sg_copy_from_buffer(job->request_payload.sg_list,
811 job->reply_payload.sg_cnt, 1141 job->request_payload.sg_cnt,
812 evt_dat->data, evt_dat->len); 1142 evt_dat->data, evt_dat->len);
813 else 1143 else
814 job->reply->reply_payload_rcv_len = 0; 1144 job->reply->reply_payload_rcv_len = 0;
815 rc = 0;
816 1145
817 if (evt_dat) 1146 if (evt_dat) {
818 kfree(evt_dat->data); 1147 kfree(evt_dat->data);
819 kfree(evt_dat); 1148 kfree(evt_dat);
820 mutex_lock(&phba->ct_event_mutex); 1149 }
821 lpfc_ct_event_unref(evt); 1150
822 mutex_unlock(&phba->ct_event_mutex); 1151 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1152 lpfc_bsg_event_unref(evt);
1153 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1154 job->dd_data = NULL;
1155 job->reply->result = 0;
1156 job->job_done(job);
1157 return 0;
1158
1159job_error:
1160 job->dd_data = NULL;
1161 job->reply->result = rc;
1162 return rc;
1163}
1164
1165/**
1166 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1167 * @phba: Pointer to HBA context object.
1168 * @cmdiocbq: Pointer to command iocb.
1169 * @rspiocbq: Pointer to response iocb.
1170 *
1171 * This function is the completion handler for iocbs issued using
1172 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1173 * ring event handler function without any lock held. This function
1174 * can be called from both worker thread context and interrupt
1175 * context. This function also can be called from other thread which
1176 * cleans up the SLI layer objects.
1177 * This function copy the contents of the response iocb to the
1178 * response iocb memory object provided by the caller of
1179 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1180 * sleeps for the iocb completion.
1181 **/
1182static void
1183lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1184 struct lpfc_iocbq *cmdiocbq,
1185 struct lpfc_iocbq *rspiocbq)
1186{
1187 struct bsg_job_data *dd_data;
1188 struct fc_bsg_job *job;
1189 IOCB_t *rsp;
1190 struct lpfc_dmabuf *bmp;
1191 struct lpfc_nodelist *ndlp;
1192 unsigned long flags;
1193 int rc = 0;
823 1194
824error_get_event_exit: 1195 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1196 dd_data = cmdiocbq->context1;
1197 /* normal completion and timeout crossed paths, already done */
1198 if (!dd_data) {
1199 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1200 return;
1201 }
1202
1203 job = dd_data->context_un.iocb.set_job;
1204 bmp = dd_data->context_un.iocb.bmp;
1205 rsp = &rspiocbq->iocb;
1206 ndlp = dd_data->context_un.iocb.ndlp;
1207
1208 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1209 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1210
1211 if (rsp->ulpStatus) {
1212 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1213 switch (rsp->un.ulpWord[4] & 0xff) {
1214 case IOERR_SEQUENCE_TIMEOUT:
1215 rc = -ETIMEDOUT;
1216 break;
1217 case IOERR_INVALID_RPI:
1218 rc = -EFAULT;
1219 break;
1220 default:
1221 rc = -EACCES;
1222 break;
1223 }
1224 } else
1225 rc = -EACCES;
1226 } else
1227 job->reply->reply_payload_rcv_len =
1228 rsp->un.genreq64.bdl.bdeSize;
1229
1230 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1231 lpfc_sli_release_iocbq(phba, cmdiocbq);
1232 lpfc_nlp_put(ndlp);
1233 kfree(bmp);
1234 kfree(dd_data);
825 /* make error code available to userspace */ 1235 /* make error code available to userspace */
826 job->reply->result = rc; 1236 job->reply->result = rc;
1237 job->dd_data = NULL;
827 /* complete the job back to userspace */ 1238 /* complete the job back to userspace */
828 job->job_done(job); 1239 job->job_done(job);
1240 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1241 return;
1242}
829 1243
1244/**
1245 * lpfc_issue_ct_rsp - issue a ct response
1246 * @phba: Pointer to HBA context object.
1247 * @job: Pointer to the job object.
1248 * @tag: tag index value into the ports context exchange array.
1249 * @bmp: Pointer to a dma buffer descriptor.
1250 * @num_entry: Number of enties in the bde.
1251 **/
1252static int
1253lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1254 struct lpfc_dmabuf *bmp, int num_entry)
1255{
1256 IOCB_t *icmd;
1257 struct lpfc_iocbq *ctiocb = NULL;
1258 int rc = 0;
1259 struct lpfc_nodelist *ndlp = NULL;
1260 struct bsg_job_data *dd_data;
1261 uint32_t creg_val;
1262
1263 /* allocate our bsg tracking structure */
1264 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1265 if (!dd_data) {
1266 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1267 "2736 Failed allocation of dd_data\n");
1268 rc = -ENOMEM;
1269 goto no_dd_data;
1270 }
1271
1272 /* Allocate buffer for command iocb */
1273 ctiocb = lpfc_sli_get_iocbq(phba);
1274 if (!ctiocb) {
1275 rc = ENOMEM;
1276 goto no_ctiocb;
1277 }
1278
1279 icmd = &ctiocb->iocb;
1280 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1281 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1282 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1283 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1284 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1285 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1286 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1287 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1288 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1289
1290 /* Fill in rest of iocb */
1291 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1292 icmd->ulpBdeCount = 1;
1293 icmd->ulpLe = 1;
1294 icmd->ulpClass = CLASS3;
1295 if (phba->sli_rev == LPFC_SLI_REV4) {
1296 /* Do not issue unsol response if oxid not marked as valid */
1297 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1298 rc = IOCB_ERROR;
1299 goto issue_ct_rsp_exit;
1300 }
1301 icmd->ulpContext = phba->ct_ctx[tag].oxid;
1302 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1303 if (!ndlp) {
1304 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1305 "2721 ndlp null for oxid %x SID %x\n",
1306 icmd->ulpContext,
1307 phba->ct_ctx[tag].SID);
1308 rc = IOCB_ERROR;
1309 goto issue_ct_rsp_exit;
1310 }
1311 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1312 /* The exchange is done, mark the entry as invalid */
1313 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1314 } else
1315 icmd->ulpContext = (ushort) tag;
1316
1317 icmd->ulpTimeout = phba->fc_ratov * 2;
1318
1319 /* Xmit CT response on exchange <xid> */
1320 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1321 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1322 icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
1323
1324 ctiocb->iocb_cmpl = NULL;
1325 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1326 ctiocb->vport = phba->pport;
1327 ctiocb->context3 = bmp;
1328
1329 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1330 ctiocb->context1 = dd_data;
1331 ctiocb->context2 = NULL;
1332 dd_data->type = TYPE_IOCB;
1333 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1334 dd_data->context_un.iocb.rspiocbq = NULL;
1335 dd_data->context_un.iocb.set_job = job;
1336 dd_data->context_un.iocb.bmp = bmp;
1337 dd_data->context_un.iocb.ndlp = ndlp;
1338
1339 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1340 creg_val = readl(phba->HCregaddr);
1341 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1342 writel(creg_val, phba->HCregaddr);
1343 readl(phba->HCregaddr); /* flush */
1344 }
1345
1346 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1347
1348 if (rc == IOCB_SUCCESS)
1349 return 0; /* done for now */
1350
1351issue_ct_rsp_exit:
1352 lpfc_sli_release_iocbq(phba, ctiocb);
1353no_ctiocb:
1354 kfree(dd_data);
1355no_dd_data:
830 return rc; 1356 return rc;
831} 1357}
832 1358
833/** 1359/**
1360 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1361 * @job: SEND_MGMT_RESP fc_bsg_job
1362 **/
1363static int
1364lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1365{
1366 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1367 struct lpfc_hba *phba = vport->phba;
1368 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1369 job->request->rqst_data.h_vendor.vendor_cmd;
1370 struct ulp_bde64 *bpl;
1371 struct lpfc_dmabuf *bmp = NULL;
1372 struct scatterlist *sgel = NULL;
1373 int request_nseg;
1374 int numbde;
1375 dma_addr_t busaddr;
1376 uint32_t tag = mgmt_resp->tag;
1377 unsigned long reqbfrcnt =
1378 (unsigned long)job->request_payload.payload_len;
1379 int rc = 0;
1380
1381 /* in case no data is transferred */
1382 job->reply->reply_payload_rcv_len = 0;
1383
1384 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1385 rc = -ERANGE;
1386 goto send_mgmt_rsp_exit;
1387 }
1388
1389 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1390 if (!bmp) {
1391 rc = -ENOMEM;
1392 goto send_mgmt_rsp_exit;
1393 }
1394
1395 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1396 if (!bmp->virt) {
1397 rc = -ENOMEM;
1398 goto send_mgmt_rsp_free_bmp;
1399 }
1400
1401 INIT_LIST_HEAD(&bmp->list);
1402 bpl = (struct ulp_bde64 *) bmp->virt;
1403 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1404 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1405 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1406 busaddr = sg_dma_address(sgel);
1407 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1408 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1409 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1410 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1411 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1412 bpl++;
1413 }
1414
1415 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1416
1417 if (rc == IOCB_SUCCESS)
1418 return 0; /* done for now */
1419
1420 /* TBD need to handle a timeout */
1421 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1422 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1423 rc = -EACCES;
1424 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1425
1426send_mgmt_rsp_free_bmp:
1427 kfree(bmp);
1428send_mgmt_rsp_exit:
1429 /* make error code available to userspace */
1430 job->reply->result = rc;
1431 job->dd_data = NULL;
1432 return rc;
1433}
1434
1435/**
1436 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1437 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1438 *
1439 * This function is responsible for placing a port into diagnostic loopback
1440 * mode in order to perform a diagnostic loopback test.
1441 * All new scsi requests are blocked, a small delay is used to allow the
1442 * scsi requests to complete then the link is brought down. If the link is
1443 * is placed in loopback mode then scsi requests are again allowed
1444 * so the scsi mid-layer doesn't give up on the port.
1445 * All of this is done in-line.
1446 */
1447static int
1448lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1449{
1450 struct Scsi_Host *shost = job->shost;
1451 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1452 struct lpfc_hba *phba = vport->phba;
1453 struct diag_mode_set *loopback_mode;
1454 struct lpfc_sli *psli = &phba->sli;
1455 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1456 uint32_t link_flags;
1457 uint32_t timeout;
1458 struct lpfc_vport **vports;
1459 LPFC_MBOXQ_t *pmboxq;
1460 int mbxstatus;
1461 int i = 0;
1462 int rc = 0;
1463
1464 /* no data to return just the return code */
1465 job->reply->reply_payload_rcv_len = 0;
1466
1467 if (job->request_len <
1468 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
1469 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1470 "2738 Received DIAG MODE request below minimum "
1471 "size\n");
1472 rc = -EINVAL;
1473 goto job_error;
1474 }
1475
1476 loopback_mode = (struct diag_mode_set *)
1477 job->request->rqst_data.h_vendor.vendor_cmd;
1478 link_flags = loopback_mode->type;
1479 timeout = loopback_mode->timeout;
1480
1481 if ((phba->link_state == LPFC_HBA_ERROR) ||
1482 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1483 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1484 rc = -EACCES;
1485 goto job_error;
1486 }
1487
1488 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1489 if (!pmboxq) {
1490 rc = -ENOMEM;
1491 goto job_error;
1492 }
1493
1494 vports = lpfc_create_vport_work_array(phba);
1495 if (vports) {
1496 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1497 shost = lpfc_shost_from_vport(vports[i]);
1498 scsi_block_requests(shost);
1499 }
1500
1501 lpfc_destroy_vport_work_array(phba, vports);
1502 } else {
1503 shost = lpfc_shost_from_vport(phba->pport);
1504 scsi_block_requests(shost);
1505 }
1506
1507 while (pring->txcmplq_cnt) {
1508 if (i++ > 500) /* wait up to 5 seconds */
1509 break;
1510
1511 msleep(10);
1512 }
1513
1514 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1515 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1516 pmboxq->u.mb.mbxOwner = OWN_HOST;
1517
1518 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1519
1520 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1521 /* wait for link down before proceeding */
1522 i = 0;
1523 while (phba->link_state != LPFC_LINK_DOWN) {
1524 if (i++ > timeout) {
1525 rc = -ETIMEDOUT;
1526 goto loopback_mode_exit;
1527 }
1528
1529 msleep(10);
1530 }
1531
1532 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1533 if (link_flags == INTERNAL_LOOP_BACK)
1534 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1535 else
1536 pmboxq->u.mb.un.varInitLnk.link_flags =
1537 FLAGS_TOPOLOGY_MODE_LOOP;
1538
1539 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1540 pmboxq->u.mb.mbxOwner = OWN_HOST;
1541
1542 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1543 LPFC_MBOX_TMO);
1544
1545 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1546 rc = -ENODEV;
1547 else {
1548 phba->link_flag |= LS_LOOPBACK_MODE;
1549 /* wait for the link attention interrupt */
1550 msleep(100);
1551
1552 i = 0;
1553 while (phba->link_state != LPFC_HBA_READY) {
1554 if (i++ > timeout) {
1555 rc = -ETIMEDOUT;
1556 break;
1557 }
1558
1559 msleep(10);
1560 }
1561 }
1562
1563 } else
1564 rc = -ENODEV;
1565
1566loopback_mode_exit:
1567 vports = lpfc_create_vport_work_array(phba);
1568 if (vports) {
1569 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1570 shost = lpfc_shost_from_vport(vports[i]);
1571 scsi_unblock_requests(shost);
1572 }
1573 lpfc_destroy_vport_work_array(phba, vports);
1574 } else {
1575 shost = lpfc_shost_from_vport(phba->pport);
1576 scsi_unblock_requests(shost);
1577 }
1578
1579 /*
1580 * Let SLI layer release mboxq if mbox command completed after timeout.
1581 */
1582 if (mbxstatus != MBX_TIMEOUT)
1583 mempool_free(pmboxq, phba->mbox_mem_pool);
1584
1585job_error:
1586 /* make error code available to userspace */
1587 job->reply->result = rc;
1588 /* complete the job back to userspace if no error */
1589 if (rc == 0)
1590 job->job_done(job);
1591 return rc;
1592}
1593
1594/**
1595 * lpfcdiag_loop_self_reg - obtains a remote port login id
1596 * @phba: Pointer to HBA context object
1597 * @rpi: Pointer to a remote port login id
1598 *
1599 * This function obtains a remote port login id so the diag loopback test
1600 * can send and receive its own unsolicited CT command.
1601 **/
1602static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1603{
1604 LPFC_MBOXQ_t *mbox;
1605 struct lpfc_dmabuf *dmabuff;
1606 int status;
1607
1608 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1609 if (!mbox)
1610 return ENOMEM;
1611
1612 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1613 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1614 if (status) {
1615 mempool_free(mbox, phba->mbox_mem_pool);
1616 return ENOMEM;
1617 }
1618
1619 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1620 mbox->context1 = NULL;
1621 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1622
1623 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1624 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1625 kfree(dmabuff);
1626 if (status != MBX_TIMEOUT)
1627 mempool_free(mbox, phba->mbox_mem_pool);
1628 return ENODEV;
1629 }
1630
1631 *rpi = mbox->u.mb.un.varWords[0];
1632
1633 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1634 kfree(dmabuff);
1635 mempool_free(mbox, phba->mbox_mem_pool);
1636 return 0;
1637}
1638
1639/**
1640 * lpfcdiag_loop_self_unreg - unregs from the rpi
1641 * @phba: Pointer to HBA context object
1642 * @rpi: Remote port login id
1643 *
1644 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1645 **/
1646static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1647{
1648 LPFC_MBOXQ_t *mbox;
1649 int status;
1650
1651 /* Allocate mboxq structure */
1652 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1653 if (mbox == NULL)
1654 return ENOMEM;
1655
1656 lpfc_unreg_login(phba, 0, rpi, mbox);
1657 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1658
1659 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1660 if (status != MBX_TIMEOUT)
1661 mempool_free(mbox, phba->mbox_mem_pool);
1662 return EIO;
1663 }
1664
1665 mempool_free(mbox, phba->mbox_mem_pool);
1666 return 0;
1667}
1668
1669/**
1670 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1671 * @phba: Pointer to HBA context object
1672 * @rpi: Remote port login id
1673 * @txxri: Pointer to transmit exchange id
1674 * @rxxri: Pointer to response exchabge id
1675 *
1676 * This function obtains the transmit and receive ids required to send
1677 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1678 * flags are used to the unsolicted response handler is able to process
1679 * the ct command sent on the same port.
1680 **/
1681static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1682 uint16_t *txxri, uint16_t * rxxri)
1683{
1684 struct lpfc_bsg_event *evt;
1685 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
1686 IOCB_t *cmd, *rsp;
1687 struct lpfc_dmabuf *dmabuf;
1688 struct ulp_bde64 *bpl = NULL;
1689 struct lpfc_sli_ct_request *ctreq = NULL;
1690 int ret_val = 0;
1691 unsigned long flags;
1692
1693 *txxri = 0;
1694 *rxxri = 0;
1695 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1696 SLI_CT_ELX_LOOPBACK);
1697 if (!evt)
1698 return ENOMEM;
1699
1700 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1701 list_add(&evt->node, &phba->ct_ev_waiters);
1702 lpfc_bsg_event_ref(evt);
1703 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1704
1705 cmdiocbq = lpfc_sli_get_iocbq(phba);
1706 rspiocbq = lpfc_sli_get_iocbq(phba);
1707
1708 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1709 if (dmabuf) {
1710 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
1711 INIT_LIST_HEAD(&dmabuf->list);
1712 bpl = (struct ulp_bde64 *) dmabuf->virt;
1713 memset(bpl, 0, sizeof(*bpl));
1714 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
1715 bpl->addrHigh =
1716 le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
1717 bpl->addrLow =
1718 le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
1719 bpl->tus.f.bdeFlags = 0;
1720 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
1721 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1722 }
1723
1724 if (cmdiocbq == NULL || rspiocbq == NULL ||
1725 dmabuf == NULL || bpl == NULL || ctreq == NULL) {
1726 ret_val = ENOMEM;
1727 goto err_get_xri_exit;
1728 }
1729
1730 cmd = &cmdiocbq->iocb;
1731 rsp = &rspiocbq->iocb;
1732
1733 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
1734
1735 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
1736 ctreq->RevisionId.bits.InId = 0;
1737 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
1738 ctreq->FsSubType = 0;
1739 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
1740 ctreq->CommandResponse.bits.Size = 0;
1741
1742
1743 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
1744 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
1745 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1746 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
1747
1748 cmd->un.xseq64.w5.hcsw.Fctl = LA;
1749 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
1750 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
1751 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1752
1753 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1754 cmd->ulpBdeCount = 1;
1755 cmd->ulpLe = 1;
1756 cmd->ulpClass = CLASS3;
1757 cmd->ulpContext = rpi;
1758
1759 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1760 cmdiocbq->vport = phba->pport;
1761
1762 ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1763 rspiocbq,
1764 (phba->fc_ratov * 2)
1765 + LPFC_DRVR_TIMEOUT);
1766 if (ret_val)
1767 goto err_get_xri_exit;
1768
1769 *txxri = rsp->ulpContext;
1770
1771 evt->waiting = 1;
1772 evt->wait_time_stamp = jiffies;
1773 ret_val = wait_event_interruptible_timeout(
1774 evt->wq, !list_empty(&evt->events_to_see),
1775 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1776 if (list_empty(&evt->events_to_see))
1777 ret_val = (ret_val) ? EINTR : ETIMEDOUT;
1778 else {
1779 ret_val = IOCB_SUCCESS;
1780 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1781 list_move(evt->events_to_see.prev, &evt->events_to_get);
1782 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1783 *rxxri = (list_entry(evt->events_to_get.prev,
1784 typeof(struct event_data),
1785 node))->immed_dat;
1786 }
1787 evt->waiting = 0;
1788
1789err_get_xri_exit:
1790 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1791 lpfc_bsg_event_unref(evt); /* release ref */
1792 lpfc_bsg_event_unref(evt); /* delete */
1793 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1794
1795 if (dmabuf) {
1796 if (dmabuf->virt)
1797 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1798 kfree(dmabuf);
1799 }
1800
1801 if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
1802 lpfc_sli_release_iocbq(phba, cmdiocbq);
1803 if (rspiocbq)
1804 lpfc_sli_release_iocbq(phba, rspiocbq);
1805 return ret_val;
1806}
1807
1808/**
1809 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1810 * @phba: Pointer to HBA context object
1811 * @bpl: Pointer to 64 bit bde structure
1812 * @size: Number of bytes to process
1813 * @nocopydata: Flag to copy user data into the allocated buffer
1814 *
1815 * This function allocates page size buffers and populates an lpfc_dmabufext.
1816 * If allowed the user data pointed to with indataptr is copied into the kernel
1817 * memory. The chained list of page size buffers is returned.
1818 **/
1819static struct lpfc_dmabufext *
1820diag_cmd_data_alloc(struct lpfc_hba *phba,
1821 struct ulp_bde64 *bpl, uint32_t size,
1822 int nocopydata)
1823{
1824 struct lpfc_dmabufext *mlist = NULL;
1825 struct lpfc_dmabufext *dmp;
1826 int cnt, offset = 0, i = 0;
1827 struct pci_dev *pcidev;
1828
1829 pcidev = phba->pcidev;
1830
1831 while (size) {
1832 /* We get chunks of 4K */
1833 if (size > BUF_SZ_4K)
1834 cnt = BUF_SZ_4K;
1835 else
1836 cnt = size;
1837
1838 /* allocate struct lpfc_dmabufext buffer header */
1839 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
1840 if (!dmp)
1841 goto out;
1842
1843 INIT_LIST_HEAD(&dmp->dma.list);
1844
1845 /* Queue it to a linked list */
1846 if (mlist)
1847 list_add_tail(&dmp->dma.list, &mlist->dma.list);
1848 else
1849 mlist = dmp;
1850
1851 /* allocate buffer */
1852 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
1853 cnt,
1854 &(dmp->dma.phys),
1855 GFP_KERNEL);
1856
1857 if (!dmp->dma.virt)
1858 goto out;
1859
1860 dmp->size = cnt;
1861
1862 if (nocopydata) {
1863 bpl->tus.f.bdeFlags = 0;
1864 pci_dma_sync_single_for_device(phba->pcidev,
1865 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1866
1867 } else {
1868 memset((uint8_t *)dmp->dma.virt, 0, cnt);
1869 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1870 }
1871
1872 /* build buffer ptr list for IOCB */
1873 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
1874 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
1875 bpl->tus.f.bdeSize = (ushort) cnt;
1876 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1877 bpl++;
1878
1879 i++;
1880 offset += cnt;
1881 size -= cnt;
1882 }
1883
1884 mlist->flag = i;
1885 return mlist;
1886out:
1887 diag_cmd_data_free(phba, mlist);
1888 return NULL;
1889}
1890
1891/**
1892 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1893 * @phba: Pointer to HBA context object
1894 * @rxxri: Receive exchange id
1895 * @len: Number of data bytes
1896 *
1897 * This function allocates and posts a data buffer of sufficient size to recieve
1898 * an unsolicted CT command.
1899 **/
1900static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1901 size_t len)
1902{
1903 struct lpfc_sli *psli = &phba->sli;
1904 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1905 struct lpfc_iocbq *cmdiocbq;
1906 IOCB_t *cmd = NULL;
1907 struct list_head head, *curr, *next;
1908 struct lpfc_dmabuf *rxbmp;
1909 struct lpfc_dmabuf *dmp;
1910 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
1911 struct ulp_bde64 *rxbpl = NULL;
1912 uint32_t num_bde;
1913 struct lpfc_dmabufext *rxbuffer = NULL;
1914 int ret_val = 0;
1915 int i = 0;
1916
1917 cmdiocbq = lpfc_sli_get_iocbq(phba);
1918 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1919 if (rxbmp != NULL) {
1920 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
1921 INIT_LIST_HEAD(&rxbmp->list);
1922 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
1923 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
1924 }
1925
1926 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1927 ret_val = ENOMEM;
1928 goto err_post_rxbufs_exit;
1929 }
1930
1931 /* Queue buffers for the receive exchange */
1932 num_bde = (uint32_t)rxbuffer->flag;
1933 dmp = &rxbuffer->dma;
1934
1935 cmd = &cmdiocbq->iocb;
1936 i = 0;
1937
1938 INIT_LIST_HEAD(&head);
1939 list_add_tail(&head, &dmp->list);
1940 list_for_each_safe(curr, next, &head) {
1941 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
1942 list_del(curr);
1943
1944 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1945 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
1946 cmd->un.quexri64cx.buff.bde.addrHigh =
1947 putPaddrHigh(mp[i]->phys);
1948 cmd->un.quexri64cx.buff.bde.addrLow =
1949 putPaddrLow(mp[i]->phys);
1950 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
1951 ((struct lpfc_dmabufext *)mp[i])->size;
1952 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
1953 cmd->ulpCommand = CMD_QUE_XRI64_CX;
1954 cmd->ulpPU = 0;
1955 cmd->ulpLe = 1;
1956 cmd->ulpBdeCount = 1;
1957 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
1958
1959 } else {
1960 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
1961 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
1962 cmd->un.cont64[i].tus.f.bdeSize =
1963 ((struct lpfc_dmabufext *)mp[i])->size;
1964 cmd->ulpBdeCount = ++i;
1965
1966 if ((--num_bde > 0) && (i < 2))
1967 continue;
1968
1969 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
1970 cmd->ulpLe = 1;
1971 }
1972
1973 cmd->ulpClass = CLASS3;
1974 cmd->ulpContext = rxxri;
1975
1976 ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
1977
1978 if (ret_val == IOCB_ERROR) {
1979 diag_cmd_data_free(phba,
1980 (struct lpfc_dmabufext *)mp[0]);
1981 if (mp[1])
1982 diag_cmd_data_free(phba,
1983 (struct lpfc_dmabufext *)mp[1]);
1984 dmp = list_entry(next, struct lpfc_dmabuf, list);
1985 ret_val = EIO;
1986 goto err_post_rxbufs_exit;
1987 }
1988
1989 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
1990 if (mp[1]) {
1991 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
1992 mp[1] = NULL;
1993 }
1994
1995 /* The iocb was freed by lpfc_sli_issue_iocb */
1996 cmdiocbq = lpfc_sli_get_iocbq(phba);
1997 if (!cmdiocbq) {
1998 dmp = list_entry(next, struct lpfc_dmabuf, list);
1999 ret_val = EIO;
2000 goto err_post_rxbufs_exit;
2001 }
2002
2003 cmd = &cmdiocbq->iocb;
2004 i = 0;
2005 }
2006 list_del(&head);
2007
2008err_post_rxbufs_exit:
2009
2010 if (rxbmp) {
2011 if (rxbmp->virt)
2012 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2013 kfree(rxbmp);
2014 }
2015
2016 if (cmdiocbq)
2017 lpfc_sli_release_iocbq(phba, cmdiocbq);
2018 return ret_val;
2019}
2020
2021/**
2022 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2023 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2024 *
2025 * This function receives a user data buffer to be transmitted and received on
2026 * the same port, the link must be up and in loopback mode prior
2027 * to being called.
2028 * 1. A kernel buffer is allocated to copy the user data into.
2029 * 2. The port registers with "itself".
2030 * 3. The transmit and receive exchange ids are obtained.
2031 * 4. The receive exchange id is posted.
2032 * 5. A new els loopback event is created.
2033 * 6. The command and response iocbs are allocated.
2034 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2035 *
2036 * This function is meant to be called n times while the port is in loopback
2037 * so it is the apps responsibility to issue a reset to take the port out
2038 * of loopback mode.
2039 **/
2040static int
2041lpfc_bsg_diag_test(struct fc_bsg_job *job)
2042{
2043 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2044 struct lpfc_hba *phba = vport->phba;
2045 struct diag_mode_test *diag_mode;
2046 struct lpfc_bsg_event *evt;
2047 struct event_data *evdat;
2048 struct lpfc_sli *psli = &phba->sli;
2049 uint32_t size;
2050 uint32_t full_size;
2051 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2052 uint16_t rpi;
2053 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2054 IOCB_t *cmd, *rsp;
2055 struct lpfc_sli_ct_request *ctreq;
2056 struct lpfc_dmabuf *txbmp;
2057 struct ulp_bde64 *txbpl = NULL;
2058 struct lpfc_dmabufext *txbuffer = NULL;
2059 struct list_head head;
2060 struct lpfc_dmabuf *curr;
2061 uint16_t txxri, rxxri;
2062 uint32_t num_bde;
2063 uint8_t *ptr = NULL, *rx_databuf = NULL;
2064 int rc = 0;
2065 unsigned long flags;
2066 void *dataout = NULL;
2067 uint32_t total_mem;
2068
2069 /* in case no data is returned return just the return code */
2070 job->reply->reply_payload_rcv_len = 0;
2071
2072 if (job->request_len <
2073 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2074 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2075 "2739 Received DIAG TEST request below minimum "
2076 "size\n");
2077 rc = -EINVAL;
2078 goto loopback_test_exit;
2079 }
2080
2081 if (job->request_payload.payload_len !=
2082 job->reply_payload.payload_len) {
2083 rc = -EINVAL;
2084 goto loopback_test_exit;
2085 }
2086
2087 diag_mode = (struct diag_mode_test *)
2088 job->request->rqst_data.h_vendor.vendor_cmd;
2089
2090 if ((phba->link_state == LPFC_HBA_ERROR) ||
2091 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2092 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2093 rc = -EACCES;
2094 goto loopback_test_exit;
2095 }
2096
2097 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2098 rc = -EACCES;
2099 goto loopback_test_exit;
2100 }
2101
2102 size = job->request_payload.payload_len;
2103 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2104
2105 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2106 rc = -ERANGE;
2107 goto loopback_test_exit;
2108 }
2109
2110 if (size >= BUF_SZ_4K) {
2111 /*
2112 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2113 * then we allocate 64k and re-use that buffer over and over to
2114 * xfer the whole block. This is because Linux kernel has a
2115 * problem allocating more than 120k of kernel space memory. Saw
2116 * problem with GET_FCPTARGETMAPPING...
2117 */
2118 if (size <= (64 * 1024))
2119 total_mem = size;
2120 else
2121 total_mem = 64 * 1024;
2122 } else
2123 /* Allocate memory for ioctl data */
2124 total_mem = BUF_SZ_4K;
2125
2126 dataout = kmalloc(total_mem, GFP_KERNEL);
2127 if (dataout == NULL) {
2128 rc = -ENOMEM;
2129 goto loopback_test_exit;
2130 }
2131
2132 ptr = dataout;
2133 ptr += ELX_LOOPBACK_HEADER_SZ;
2134 sg_copy_to_buffer(job->request_payload.sg_list,
2135 job->request_payload.sg_cnt,
2136 ptr, size);
2137
2138 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2139 if (rc) {
2140 rc = -ENOMEM;
2141 goto loopback_test_exit;
2142 }
2143
2144 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2145 if (rc) {
2146 lpfcdiag_loop_self_unreg(phba, rpi);
2147 rc = -ENOMEM;
2148 goto loopback_test_exit;
2149 }
2150
2151 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2152 if (rc) {
2153 lpfcdiag_loop_self_unreg(phba, rpi);
2154 rc = -ENOMEM;
2155 goto loopback_test_exit;
2156 }
2157
2158 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2159 SLI_CT_ELX_LOOPBACK);
2160 if (!evt) {
2161 lpfcdiag_loop_self_unreg(phba, rpi);
2162 rc = -ENOMEM;
2163 goto loopback_test_exit;
2164 }
2165
2166 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2167 list_add(&evt->node, &phba->ct_ev_waiters);
2168 lpfc_bsg_event_ref(evt);
2169 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2170
2171 cmdiocbq = lpfc_sli_get_iocbq(phba);
2172 rspiocbq = lpfc_sli_get_iocbq(phba);
2173 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2174
2175 if (txbmp) {
2176 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2177 INIT_LIST_HEAD(&txbmp->list);
2178 txbpl = (struct ulp_bde64 *) txbmp->virt;
2179 if (txbpl)
2180 txbuffer = diag_cmd_data_alloc(phba,
2181 txbpl, full_size, 0);
2182 }
2183
2184 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
2185 rc = -ENOMEM;
2186 goto err_loopback_test_exit;
2187 }
2188
2189 cmd = &cmdiocbq->iocb;
2190 rsp = &rspiocbq->iocb;
2191
2192 INIT_LIST_HEAD(&head);
2193 list_add_tail(&head, &txbuffer->dma.list);
2194 list_for_each_entry(curr, &head, list) {
2195 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2196 if (current_offset == 0) {
2197 ctreq = curr->virt;
2198 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2199 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2200 ctreq->RevisionId.bits.InId = 0;
2201 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2202 ctreq->FsSubType = 0;
2203 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2204 ctreq->CommandResponse.bits.Size = size;
2205 segment_offset = ELX_LOOPBACK_HEADER_SZ;
2206 } else
2207 segment_offset = 0;
2208
2209 BUG_ON(segment_offset >= segment_len);
2210 memcpy(curr->virt + segment_offset,
2211 ptr + current_offset,
2212 segment_len - segment_offset);
2213
2214 current_offset += segment_len - segment_offset;
2215 BUG_ON(current_offset > size);
2216 }
2217 list_del(&head);
2218
2219 /* Build the XMIT_SEQUENCE iocb */
2220
2221 num_bde = (uint32_t)txbuffer->flag;
2222
2223 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2224 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2225 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2226 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2227
2228 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2229 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2230 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2231 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2232
2233 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2234 cmd->ulpBdeCount = 1;
2235 cmd->ulpLe = 1;
2236 cmd->ulpClass = CLASS3;
2237 cmd->ulpContext = txxri;
2238
2239 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2240 cmdiocbq->vport = phba->pport;
2241
2242 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
2243 (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
2244
2245 if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2246 rc = -EIO;
2247 goto err_loopback_test_exit;
2248 }
2249
2250 evt->waiting = 1;
2251 rc = wait_event_interruptible_timeout(
2252 evt->wq, !list_empty(&evt->events_to_see),
2253 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2254 evt->waiting = 0;
2255 if (list_empty(&evt->events_to_see))
2256 rc = (rc) ? -EINTR : -ETIMEDOUT;
2257 else {
2258 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2259 list_move(evt->events_to_see.prev, &evt->events_to_get);
2260 evdat = list_entry(evt->events_to_get.prev,
2261 typeof(*evdat), node);
2262 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2263 rx_databuf = evdat->data;
2264 if (evdat->len != full_size) {
2265 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2266 "1603 Loopback test did not receive expected "
2267 "data length. actual length 0x%x expected "
2268 "length 0x%x\n",
2269 evdat->len, full_size);
2270 rc = -EIO;
2271 } else if (rx_databuf == NULL)
2272 rc = -EIO;
2273 else {
2274 rc = IOCB_SUCCESS;
2275 /* skip over elx loopback header */
2276 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2277 job->reply->reply_payload_rcv_len =
2278 sg_copy_from_buffer(job->reply_payload.sg_list,
2279 job->reply_payload.sg_cnt,
2280 rx_databuf, size);
2281 job->reply->reply_payload_rcv_len = size;
2282 }
2283 }
2284
2285err_loopback_test_exit:
2286 lpfcdiag_loop_self_unreg(phba, rpi);
2287
2288 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2289 lpfc_bsg_event_unref(evt); /* release ref */
2290 lpfc_bsg_event_unref(evt); /* delete */
2291 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2292
2293 if (cmdiocbq != NULL)
2294 lpfc_sli_release_iocbq(phba, cmdiocbq);
2295
2296 if (rspiocbq != NULL)
2297 lpfc_sli_release_iocbq(phba, rspiocbq);
2298
2299 if (txbmp != NULL) {
2300 if (txbpl != NULL) {
2301 if (txbuffer != NULL)
2302 diag_cmd_data_free(phba, txbuffer);
2303 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2304 }
2305 kfree(txbmp);
2306 }
2307
2308loopback_test_exit:
2309 kfree(dataout);
2310 /* make error code available to userspace */
2311 job->reply->result = rc;
2312 job->dd_data = NULL;
2313 /* complete the job back to userspace if no error */
2314 if (rc == 0)
2315 job->job_done(job);
2316 return rc;
2317}
2318
2319/**
2320 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2321 * @job: GET_DFC_REV fc_bsg_job
2322 **/
2323static int
2324lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2325{
2326 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2327 struct lpfc_hba *phba = vport->phba;
2328 struct get_mgmt_rev *event_req;
2329 struct get_mgmt_rev_reply *event_reply;
2330 int rc = 0;
2331
2332 if (job->request_len <
2333 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2334 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2335 "2740 Received GET_DFC_REV request below "
2336 "minimum size\n");
2337 rc = -EINVAL;
2338 goto job_error;
2339 }
2340
2341 event_req = (struct get_mgmt_rev *)
2342 job->request->rqst_data.h_vendor.vendor_cmd;
2343
2344 event_reply = (struct get_mgmt_rev_reply *)
2345 job->reply->reply_data.vendor_reply.vendor_rsp;
2346
2347 if (job->reply_len <
2348 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2349 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2350 "2741 Received GET_DFC_REV reply below "
2351 "minimum size\n");
2352 rc = -EINVAL;
2353 goto job_error;
2354 }
2355
2356 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2357 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2358job_error:
2359 job->reply->result = rc;
2360 if (rc == 0)
2361 job->job_done(job);
2362 return rc;
2363}
2364
2365/**
2366 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2367 * @phba: Pointer to HBA context object.
2368 * @pmboxq: Pointer to mailbox command.
2369 *
2370 * This is completion handler function for mailbox commands issued from
2371 * lpfc_bsg_issue_mbox function. This function is called by the
2372 * mailbox event handler function with no lock held. This function
2373 * will wake up thread waiting on the wait queue pointed by context1
2374 * of the mailbox.
2375 **/
2376void
2377lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2378{
2379 struct bsg_job_data *dd_data;
2380 MAILBOX_t *pmb;
2381 MAILBOX_t *mb;
2382 struct fc_bsg_job *job;
2383 uint32_t size;
2384 unsigned long flags;
2385
2386 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2387 dd_data = pmboxq->context1;
2388 if (!dd_data) {
2389 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2390 return;
2391 }
2392
2393 pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
2394 mb = dd_data->context_un.mbox.mb;
2395 job = dd_data->context_un.mbox.set_job;
2396 memcpy(mb, pmb, sizeof(*pmb));
2397 size = job->request_payload.payload_len;
2398 job->reply->reply_payload_rcv_len =
2399 sg_copy_from_buffer(job->reply_payload.sg_list,
2400 job->reply_payload.sg_cnt,
2401 mb, size);
2402 job->reply->result = 0;
2403 dd_data->context_un.mbox.set_job = NULL;
2404 job->dd_data = NULL;
2405 job->job_done(job);
2406 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2407 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2408 kfree(mb);
2409 kfree(dd_data);
2410 return;
2411}
2412
2413/**
2414 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2415 * @phba: Pointer to HBA context object.
2416 * @mb: Pointer to a mailbox object.
2417 * @vport: Pointer to a vport object.
2418 *
2419 * Some commands require the port to be offline, some may not be called from
2420 * the application.
2421 **/
2422static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2423 MAILBOX_t *mb, struct lpfc_vport *vport)
2424{
2425 /* return negative error values for bsg job */
2426 switch (mb->mbxCommand) {
2427 /* Offline only */
2428 case MBX_INIT_LINK:
2429 case MBX_DOWN_LINK:
2430 case MBX_CONFIG_LINK:
2431 case MBX_CONFIG_RING:
2432 case MBX_RESET_RING:
2433 case MBX_UNREG_LOGIN:
2434 case MBX_CLEAR_LA:
2435 case MBX_DUMP_CONTEXT:
2436 case MBX_RUN_DIAGS:
2437 case MBX_RESTART:
2438 case MBX_SET_MASK:
2439 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
2440 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2441 "2743 Command 0x%x is illegal in on-line "
2442 "state\n",
2443 mb->mbxCommand);
2444 return -EPERM;
2445 }
2446 case MBX_WRITE_NV:
2447 case MBX_WRITE_VPARMS:
2448 case MBX_LOAD_SM:
2449 case MBX_READ_NV:
2450 case MBX_READ_CONFIG:
2451 case MBX_READ_RCONFIG:
2452 case MBX_READ_STATUS:
2453 case MBX_READ_XRI:
2454 case MBX_READ_REV:
2455 case MBX_READ_LNK_STAT:
2456 case MBX_DUMP_MEMORY:
2457 case MBX_DOWN_LOAD:
2458 case MBX_UPDATE_CFG:
2459 case MBX_KILL_BOARD:
2460 case MBX_LOAD_AREA:
2461 case MBX_LOAD_EXP_ROM:
2462 case MBX_BEACON:
2463 case MBX_DEL_LD_ENTRY:
2464 case MBX_SET_DEBUG:
2465 case MBX_WRITE_WWN:
2466 case MBX_SLI4_CONFIG:
2467 case MBX_READ_EVENT_LOG_STATUS:
2468 case MBX_WRITE_EVENT_LOG:
2469 case MBX_PORT_CAPABILITIES:
2470 case MBX_PORT_IOV_CONTROL:
2471 break;
2472 case MBX_SET_VARIABLE:
2473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2474 "1226 mbox: set_variable 0x%x, 0x%x\n",
2475 mb->un.varWords[0],
2476 mb->un.varWords[1]);
2477 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
2478 && (mb->un.varWords[1] == 1)) {
2479 phba->wait_4_mlo_maint_flg = 1;
2480 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
2481 phba->link_flag &= ~LS_LOOPBACK_MODE;
2482 phba->fc_topology = TOPOLOGY_PT_PT;
2483 }
2484 break;
2485 case MBX_RUN_BIU_DIAG64:
2486 case MBX_READ_EVENT_LOG:
2487 case MBX_READ_SPARM64:
2488 case MBX_READ_LA:
2489 case MBX_READ_LA64:
2490 case MBX_REG_LOGIN:
2491 case MBX_REG_LOGIN64:
2492 case MBX_CONFIG_PORT:
2493 case MBX_RUN_BIU_DIAG:
2494 default:
2495 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2496 "2742 Unknown Command 0x%x\n",
2497 mb->mbxCommand);
2498 return -EPERM;
2499 }
2500
2501 return 0; /* ok */
2502}
2503
2504/**
2505 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2506 * @phba: Pointer to HBA context object.
2507 * @mb: Pointer to a mailbox object.
2508 * @vport: Pointer to a vport object.
2509 *
2510 * Allocate a tracking object, mailbox command memory, get a mailbox
2511 * from the mailbox pool, copy the caller mailbox command.
2512 *
2513 * If offline and the sli is active we need to poll for the command (port is
2514 * being reset) and com-plete the job, otherwise issue the mailbox command and
2515 * let our completion handler finish the command.
2516 **/
2517static uint32_t
2518lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2519 struct lpfc_vport *vport)
2520{
2521 LPFC_MBOXQ_t *pmboxq;
2522 MAILBOX_t *pmb;
2523 MAILBOX_t *mb;
2524 struct bsg_job_data *dd_data;
2525 uint32_t size;
2526 int rc = 0;
2527
2528 /* allocate our bsg tracking structure */
2529 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2530 if (!dd_data) {
2531 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2532 "2727 Failed allocation of dd_data\n");
2533 return -ENOMEM;
2534 }
2535
2536 mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2537 if (!mb) {
2538 kfree(dd_data);
2539 return -ENOMEM;
2540 }
2541
2542 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2543 if (!pmboxq) {
2544 kfree(dd_data);
2545 kfree(mb);
2546 return -ENOMEM;
2547 }
2548
2549 size = job->request_payload.payload_len;
2550 job->reply->reply_payload_rcv_len =
2551 sg_copy_to_buffer(job->request_payload.sg_list,
2552 job->request_payload.sg_cnt,
2553 mb, size);
2554
2555 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2556 if (rc != 0) {
2557 kfree(dd_data);
2558 kfree(mb);
2559 mempool_free(pmboxq, phba->mbox_mem_pool);
2560 return rc; /* must be negative */
2561 }
2562
2563 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2564 pmb = &pmboxq->u.mb;
2565 memcpy(pmb, mb, sizeof(*pmb));
2566 pmb->mbxOwner = OWN_HOST;
2567 pmboxq->context1 = NULL;
2568 pmboxq->vport = vport;
2569
2570 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
2571 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
2572 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2573 if (rc != MBX_SUCCESS) {
2574 if (rc != MBX_TIMEOUT) {
2575 kfree(dd_data);
2576 kfree(mb);
2577 mempool_free(pmboxq, phba->mbox_mem_pool);
2578 }
2579 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2580 }
2581
2582 memcpy(mb, pmb, sizeof(*pmb));
2583 job->reply->reply_payload_rcv_len =
2584 sg_copy_from_buffer(job->reply_payload.sg_list,
2585 job->reply_payload.sg_cnt,
2586 mb, size);
2587 kfree(dd_data);
2588 kfree(mb);
2589 mempool_free(pmboxq, phba->mbox_mem_pool);
2590 /* not waiting mbox already done */
2591 return 0;
2592 }
2593
2594 /* setup wake call as IOCB callback */
2595 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2596 /* setup context field to pass wait_queue pointer to wake function */
2597 pmboxq->context1 = dd_data;
2598 dd_data->type = TYPE_MBOX;
2599 dd_data->context_un.mbox.pmboxq = pmboxq;
2600 dd_data->context_un.mbox.mb = mb;
2601 dd_data->context_un.mbox.set_job = job;
2602 job->dd_data = dd_data;
2603 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2604 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
2605 kfree(dd_data);
2606 kfree(mb);
2607 mempool_free(pmboxq, phba->mbox_mem_pool);
2608 return -EIO;
2609 }
2610
2611 return 1;
2612}
2613
2614/**
2615 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
2616 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
2617 **/
2618static int
2619lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
2620{
2621 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2622 struct lpfc_hba *phba = vport->phba;
2623 int rc = 0;
2624
2625 /* in case no data is transferred */
2626 job->reply->reply_payload_rcv_len = 0;
2627 if (job->request_len <
2628 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
2629 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2630 "2737 Received MBOX_REQ request below "
2631 "minimum size\n");
2632 rc = -EINVAL;
2633 goto job_error;
2634 }
2635
2636 if (job->request_payload.payload_len != PAGE_SIZE) {
2637 rc = -EINVAL;
2638 goto job_error;
2639 }
2640
2641 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2642 rc = -EAGAIN;
2643 goto job_error;
2644 }
2645
2646 rc = lpfc_bsg_issue_mbox(phba, job, vport);
2647
2648job_error:
2649 if (rc == 0) {
2650 /* job done */
2651 job->reply->result = 0;
2652 job->dd_data = NULL;
2653 job->job_done(job);
2654 } else if (rc == 1)
2655 /* job submitted, will complete later*/
2656 rc = 0; /* return zero, no error */
2657 else {
2658 /* some error occurred */
2659 job->reply->result = rc;
2660 job->dd_data = NULL;
2661 }
2662
2663 return rc;
2664}
2665
2666/**
2667 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
2668 * @phba: Pointer to HBA context object.
2669 * @cmdiocbq: Pointer to command iocb.
2670 * @rspiocbq: Pointer to response iocb.
2671 *
2672 * This function is the completion handler for iocbs issued using
2673 * lpfc_menlo_cmd function. This function is called by the
2674 * ring event handler function without any lock held. This function
2675 * can be called from both worker thread context and interrupt
2676 * context. This function also can be called from another thread which
2677 * cleans up the SLI layer objects.
2678 * This function copies the contents of the response iocb to the
2679 * response iocb memory object provided by the caller of
2680 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
2681 * sleeps for the iocb completion.
2682 **/
2683static void
2684lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
2685 struct lpfc_iocbq *cmdiocbq,
2686 struct lpfc_iocbq *rspiocbq)
2687{
2688 struct bsg_job_data *dd_data;
2689 struct fc_bsg_job *job;
2690 IOCB_t *rsp;
2691 struct lpfc_dmabuf *bmp;
2692 struct lpfc_bsg_menlo *menlo;
2693 unsigned long flags;
2694 struct menlo_response *menlo_resp;
2695 int rc = 0;
2696
2697 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2698 dd_data = cmdiocbq->context1;
2699 if (!dd_data) {
2700 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2701 return;
2702 }
2703
2704 menlo = &dd_data->context_un.menlo;
2705 job = menlo->set_job;
2706 job->dd_data = NULL; /* so timeout handler does not reply */
2707
2708 spin_lock_irqsave(&phba->hbalock, flags);
2709 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
2710 if (cmdiocbq->context2 && rspiocbq)
2711 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
2712 &rspiocbq->iocb, sizeof(IOCB_t));
2713 spin_unlock_irqrestore(&phba->hbalock, flags);
2714
2715 bmp = menlo->bmp;
2716 rspiocbq = menlo->rspiocbq;
2717 rsp = &rspiocbq->iocb;
2718
2719 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2720 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2721 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2722 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2723
2724 /* always return the xri, this would be used in the case
2725 * of a menlo download to allow the data to be sent as a continuation
2726 * of the exchange.
2727 */
2728 menlo_resp = (struct menlo_response *)
2729 job->reply->reply_data.vendor_reply.vendor_rsp;
2730 menlo_resp->xri = rsp->ulpContext;
2731 if (rsp->ulpStatus) {
2732 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
2733 switch (rsp->un.ulpWord[4] & 0xff) {
2734 case IOERR_SEQUENCE_TIMEOUT:
2735 rc = -ETIMEDOUT;
2736 break;
2737 case IOERR_INVALID_RPI:
2738 rc = -EFAULT;
2739 break;
2740 default:
2741 rc = -EACCES;
2742 break;
2743 }
2744 } else
2745 rc = -EACCES;
2746 } else
2747 job->reply->reply_payload_rcv_len =
2748 rsp->un.genreq64.bdl.bdeSize;
2749
2750 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2751 lpfc_sli_release_iocbq(phba, rspiocbq);
2752 lpfc_sli_release_iocbq(phba, cmdiocbq);
2753 kfree(bmp);
2754 kfree(dd_data);
2755 /* make error code available to userspace */
2756 job->reply->result = rc;
2757 /* complete the job back to userspace */
2758 job->job_done(job);
2759 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2760 return;
2761}
2762
2763/**
2764 * lpfc_menlo_cmd - send an ioctl for menlo hardware
2765 * @job: fc_bsg_job to handle
2766 *
2767 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
2768 * all the command completions will return the xri for the command.
2769 * For menlo data requests a gen request 64 CX is used to continue the exchange
2770 * supplied in the menlo request header xri field.
2771 **/
2772static int
2773lpfc_menlo_cmd(struct fc_bsg_job *job)
2774{
2775 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2776 struct lpfc_hba *phba = vport->phba;
2777 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2778 IOCB_t *cmd, *rsp;
2779 int rc = 0;
2780 struct menlo_command *menlo_cmd;
2781 struct menlo_response *menlo_resp;
2782 struct lpfc_dmabuf *bmp = NULL;
2783 int request_nseg;
2784 int reply_nseg;
2785 struct scatterlist *sgel = NULL;
2786 int numbde;
2787 dma_addr_t busaddr;
2788 struct bsg_job_data *dd_data;
2789 struct ulp_bde64 *bpl = NULL;
2790
2791 /* in case no data is returned return just the return code */
2792 job->reply->reply_payload_rcv_len = 0;
2793
2794 if (job->request_len <
2795 sizeof(struct fc_bsg_request) +
2796 sizeof(struct menlo_command)) {
2797 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2798 "2784 Received MENLO_CMD request below "
2799 "minimum size\n");
2800 rc = -ERANGE;
2801 goto no_dd_data;
2802 }
2803
2804 if (job->reply_len <
2805 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
2806 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2807 "2785 Received MENLO_CMD reply below "
2808 "minimum size\n");
2809 rc = -ERANGE;
2810 goto no_dd_data;
2811 }
2812
2813 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
2814 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2815 "2786 Adapter does not support menlo "
2816 "commands\n");
2817 rc = -EPERM;
2818 goto no_dd_data;
2819 }
2820
2821 menlo_cmd = (struct menlo_command *)
2822 job->request->rqst_data.h_vendor.vendor_cmd;
2823
2824 menlo_resp = (struct menlo_response *)
2825 job->reply->reply_data.vendor_reply.vendor_rsp;
2826
2827 /* allocate our bsg tracking structure */
2828 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2829 if (!dd_data) {
2830 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2831 "2787 Failed allocation of dd_data\n");
2832 rc = -ENOMEM;
2833 goto no_dd_data;
2834 }
2835
2836 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2837 if (!bmp) {
2838 rc = -ENOMEM;
2839 goto free_dd;
2840 }
2841
2842 cmdiocbq = lpfc_sli_get_iocbq(phba);
2843 if (!cmdiocbq) {
2844 rc = -ENOMEM;
2845 goto free_bmp;
2846 }
2847
2848 rspiocbq = lpfc_sli_get_iocbq(phba);
2849 if (!rspiocbq) {
2850 rc = -ENOMEM;
2851 goto free_cmdiocbq;
2852 }
2853
2854 rsp = &rspiocbq->iocb;
2855
2856 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
2857 if (!bmp->virt) {
2858 rc = -ENOMEM;
2859 goto free_rspiocbq;
2860 }
2861
2862 INIT_LIST_HEAD(&bmp->list);
2863 bpl = (struct ulp_bde64 *) bmp->virt;
2864 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
2865 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2866 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
2867 busaddr = sg_dma_address(sgel);
2868 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2869 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2870 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2871 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2872 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2873 bpl++;
2874 }
2875
2876 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
2877 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2878 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
2879 busaddr = sg_dma_address(sgel);
2880 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2881 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2882 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2883 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2884 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2885 bpl++;
2886 }
2887
2888 cmd = &cmdiocbq->iocb;
2889 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
2890 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
2891 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
2892 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2893 cmd->un.genreq64.bdl.bdeSize =
2894 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
2895 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
2896 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
2897 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
2898 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
2899 cmd->ulpBdeCount = 1;
2900 cmd->ulpClass = CLASS3;
2901 cmd->ulpOwner = OWN_CHIP;
2902 cmd->ulpLe = 1; /* Limited Edition */
2903 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2904 cmdiocbq->vport = phba->pport;
2905 /* We want the firmware to timeout before we do */
2906 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
2907 cmdiocbq->context3 = bmp;
2908 cmdiocbq->context2 = rspiocbq;
2909 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
2910 cmdiocbq->context1 = dd_data;
2911 cmdiocbq->context2 = rspiocbq;
2912 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
2913 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
2914 cmd->ulpPU = MENLO_PU; /* 3 */
2915 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
2916 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
2917 } else {
2918 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
2919 cmd->ulpPU = 1;
2920 cmd->un.ulpWord[4] = 0;
2921 cmd->ulpContext = menlo_cmd->xri;
2922 }
2923
2924 dd_data->type = TYPE_MENLO;
2925 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
2926 dd_data->context_un.menlo.rspiocbq = rspiocbq;
2927 dd_data->context_un.menlo.set_job = job;
2928 dd_data->context_un.menlo.bmp = bmp;
2929
2930 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2931 MENLO_TIMEOUT - 5);
2932 if (rc == IOCB_SUCCESS)
2933 return 0; /* done for now */
2934
2935 /* iocb failed so cleanup */
2936 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2937 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2938 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2939 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2940
2941 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2942
2943free_rspiocbq:
2944 lpfc_sli_release_iocbq(phba, rspiocbq);
2945free_cmdiocbq:
2946 lpfc_sli_release_iocbq(phba, cmdiocbq);
2947free_bmp:
2948 kfree(bmp);
2949free_dd:
2950 kfree(dd_data);
2951no_dd_data:
2952 /* make error code available to userspace */
2953 job->reply->result = rc;
2954 job->dd_data = NULL;
2955 return rc;
2956}
2957/**
834 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 2958 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
835 * @job: fc_bsg_job to handle 2959 * @job: fc_bsg_job to handle
836 */ 2960 **/
837static int 2961static int
838lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 2962lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
839{ 2963{
840 int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; 2964 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
2965 int rc;
841 2966
842 switch (command) { 2967 switch (command) {
843 case LPFC_BSG_VENDOR_SET_CT_EVENT: 2968 case LPFC_BSG_VENDOR_SET_CT_EVENT:
844 return lpfc_bsg_set_event(job); 2969 rc = lpfc_bsg_hba_set_event(job);
845 break; 2970 break;
846
847 case LPFC_BSG_VENDOR_GET_CT_EVENT: 2971 case LPFC_BSG_VENDOR_GET_CT_EVENT:
848 return lpfc_bsg_get_event(job); 2972 rc = lpfc_bsg_hba_get_event(job);
2973 break;
2974 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
2975 rc = lpfc_bsg_send_mgmt_rsp(job);
2976 break;
2977 case LPFC_BSG_VENDOR_DIAG_MODE:
2978 rc = lpfc_bsg_diag_mode(job);
2979 break;
2980 case LPFC_BSG_VENDOR_DIAG_TEST:
2981 rc = lpfc_bsg_diag_test(job);
2982 break;
2983 case LPFC_BSG_VENDOR_GET_MGMT_REV:
2984 rc = lpfc_bsg_get_dfc_rev(job);
2985 break;
2986 case LPFC_BSG_VENDOR_MBOX:
2987 rc = lpfc_bsg_mbox_cmd(job);
2988 break;
2989 case LPFC_BSG_VENDOR_MENLO_CMD:
2990 case LPFC_BSG_VENDOR_MENLO_DATA:
2991 rc = lpfc_menlo_cmd(job);
849 break; 2992 break;
850
851 default: 2993 default:
852 return -EINVAL; 2994 rc = -EINVAL;
2995 job->reply->reply_payload_rcv_len = 0;
2996 /* make error code available to userspace */
2997 job->reply->result = rc;
2998 break;
853 } 2999 }
3000
3001 return rc;
854} 3002}
855 3003
856/** 3004/**
857 * lpfc_bsg_request - handle a bsg request from the FC transport 3005 * lpfc_bsg_request - handle a bsg request from the FC transport
858 * @job: fc_bsg_job to handle 3006 * @job: fc_bsg_job to handle
859 */ 3007 **/
860int 3008int
861lpfc_bsg_request(struct fc_bsg_job *job) 3009lpfc_bsg_request(struct fc_bsg_job *job)
862{ 3010{
863 uint32_t msgcode; 3011 uint32_t msgcode;
864 int rc = -EINVAL; 3012 int rc;
865 3013
866 msgcode = job->request->msgcode; 3014 msgcode = job->request->msgcode;
867
868 switch (msgcode) { 3015 switch (msgcode) {
869 case FC_BSG_HST_VENDOR: 3016 case FC_BSG_HST_VENDOR:
870 rc = lpfc_bsg_hst_vendor(job); 3017 rc = lpfc_bsg_hst_vendor(job);
@@ -873,9 +3020,13 @@ lpfc_bsg_request(struct fc_bsg_job *job)
873 rc = lpfc_bsg_rport_els(job); 3020 rc = lpfc_bsg_rport_els(job);
874 break; 3021 break;
875 case FC_BSG_RPT_CT: 3022 case FC_BSG_RPT_CT:
876 rc = lpfc_bsg_rport_ct(job); 3023 rc = lpfc_bsg_send_mgmt_cmd(job);
877 break; 3024 break;
878 default: 3025 default:
3026 rc = -EINVAL;
3027 job->reply->reply_payload_rcv_len = 0;
3028 /* make error code available to userspace */
3029 job->reply->result = rc;
879 break; 3030 break;
880 } 3031 }
881 3032
@@ -888,17 +3039,83 @@ lpfc_bsg_request(struct fc_bsg_job *job)
888 * 3039 *
889 * This function just aborts the job's IOCB. The aborted IOCB will return to 3040 * This function just aborts the job's IOCB. The aborted IOCB will return to
890 * the waiting function which will handle passing the error back to userspace 3041 * the waiting function which will handle passing the error back to userspace
891 */ 3042 **/
892int 3043int
893lpfc_bsg_timeout(struct fc_bsg_job *job) 3044lpfc_bsg_timeout(struct fc_bsg_job *job)
894{ 3045{
895 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3046 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
896 struct lpfc_hba *phba = vport->phba; 3047 struct lpfc_hba *phba = vport->phba;
897 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data; 3048 struct lpfc_iocbq *cmdiocb;
3049 struct lpfc_bsg_event *evt;
3050 struct lpfc_bsg_iocb *iocb;
3051 struct lpfc_bsg_mbox *mbox;
3052 struct lpfc_bsg_menlo *menlo;
898 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3053 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3054 struct bsg_job_data *dd_data;
3055 unsigned long flags;
3056
3057 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3058 dd_data = (struct bsg_job_data *)job->dd_data;
3059 /* timeout and completion crossed paths if no dd_data */
3060 if (!dd_data) {
3061 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3062 return 0;
3063 }
899 3064
900 if (cmdiocb) 3065 switch (dd_data->type) {
3066 case TYPE_IOCB:
3067 iocb = &dd_data->context_un.iocb;
3068 cmdiocb = iocb->cmdiocbq;
3069 /* hint to completion handler that the job timed out */
3070 job->reply->result = -EAGAIN;
3071 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3072 /* this will call our completion handler */
3073 spin_lock_irq(&phba->hbalock);
3074 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3075 spin_unlock_irq(&phba->hbalock);
3076 break;
3077 case TYPE_EVT:
3078 evt = dd_data->context_un.evt;
3079 /* this event has no job anymore */
3080 evt->set_job = NULL;
3081 job->dd_data = NULL;
3082 job->reply->reply_payload_rcv_len = 0;
3083 /* Return -EAGAIN which is our way of signallying the
3084 * app to retry.
3085 */
3086 job->reply->result = -EAGAIN;
3087 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3088 job->job_done(job);
3089 break;
3090 case TYPE_MBOX:
3091 mbox = &dd_data->context_un.mbox;
3092 /* this mbox has no job anymore */
3093 mbox->set_job = NULL;
3094 job->dd_data = NULL;
3095 job->reply->reply_payload_rcv_len = 0;
3096 job->reply->result = -EAGAIN;
3097 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3098 job->job_done(job);
3099 break;
3100 case TYPE_MENLO:
3101 menlo = &dd_data->context_un.menlo;
3102 cmdiocb = menlo->cmdiocbq;
3103 /* hint to completion handler that the job timed out */
3104 job->reply->result = -EAGAIN;
3105 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3106 /* this will call our completion handler */
3107 spin_lock_irq(&phba->hbalock);
901 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 3108 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3109 spin_unlock_irq(&phba->hbalock);
3110 break;
3111 default:
3112 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3113 break;
3114 }
902 3115
3116 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
3117 * otherwise an error message will be displayed on the console
3118 * so always return success (zero)
3119 */
903 return 0; 3120 return 0;
904} 3121}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
new file mode 100644
index 000000000000..5bc630819b9e
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -0,0 +1,110 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20/* bsg definitions
21 * No pointers to user data are allowed, all application buffers and sizes will
22 * derived through the bsg interface.
23 *
24 * These are the vendor unique structures passed in using the bsg
25 * FC_BSG_HST_VENDOR message code type.
26 */
27#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
28#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
29#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
30#define LPFC_BSG_VENDOR_DIAG_MODE 4
31#define LPFC_BSG_VENDOR_DIAG_TEST 5
32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
33#define LPFC_BSG_VENDOR_MBOX 7
34#define LPFC_BSG_VENDOR_MENLO_CMD 8
35#define LPFC_BSG_VENDOR_MENLO_DATA 9
36
37struct set_ct_event {
38 uint32_t command;
39 uint32_t type_mask;
40 uint32_t ev_req_id;
41 uint32_t ev_reg_id;
42};
43
44struct get_ct_event {
45 uint32_t command;
46 uint32_t ev_reg_id;
47 uint32_t ev_req_id;
48};
49
50struct get_ct_event_reply {
51 uint32_t immed_data;
52 uint32_t type;
53};
54
55struct send_mgmt_resp {
56 uint32_t command;
57 uint32_t tag;
58};
59
60
61#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
62#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
63
64struct diag_mode_set {
65 uint32_t command;
66 uint32_t type;
67 uint32_t timeout;
68};
69
70struct diag_mode_test {
71 uint32_t command;
72};
73
74#define LPFC_WWNN_TYPE 0
75#define LPFC_WWPN_TYPE 1
76
77struct get_mgmt_rev {
78 uint32_t command;
79};
80
81#define MANAGEMENT_MAJOR_REV 1
82#define MANAGEMENT_MINOR_REV 0
83
84/* the MgmtRevInfo structure */
85struct MgmtRevInfo {
86 uint32_t a_Major;
87 uint32_t a_Minor;
88};
89
90struct get_mgmt_rev_reply {
91 struct MgmtRevInfo info;
92};
93
94struct dfc_mbox_req {
95 uint32_t command;
96 uint32_t inExtWLen;
97 uint32_t outExtWLen;
98 uint8_t mbOffset;
99};
100
101/* Used for menlo command or menlo data. The xri is only used for menlo data */
102struct menlo_command {
103 uint32_t cmd;
104 uint32_t xri;
105};
106
107struct menlo_response {
108 uint32_t xri; /* return the xri of the iocb exchange */
109};
110
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 0830f37409a3..5087c4211b43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -44,16 +44,27 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
44void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
45void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
46void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 46void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
47void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
48 struct lpfc_nodelist *);
47void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 49void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
48void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 50void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
49void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); 51void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
52void lpfc_supported_pages(struct lpfcMboxq *);
53void lpfc_sli4_params(struct lpfcMboxq *);
54int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
50 55
51struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 56struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
57void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
58void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
52void lpfc_cleanup_rpis(struct lpfc_vport *, int); 59void lpfc_cleanup_rpis(struct lpfc_vport *, int);
60void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
53int lpfc_linkdown(struct lpfc_hba *); 61int lpfc_linkdown(struct lpfc_hba *);
54void lpfc_linkdown_port(struct lpfc_vport *); 62void lpfc_linkdown_port(struct lpfc_vport *);
55void lpfc_port_link_failure(struct lpfc_vport *); 63void lpfc_port_link_failure(struct lpfc_vport *);
56void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 64void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
66void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
67void lpfc_retry_pport_discovery(struct lpfc_hba *);
57 68
58void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 69void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
59void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); 70void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -71,6 +82,7 @@ void lpfc_set_disctmo(struct lpfc_vport *);
71int lpfc_can_disctmo(struct lpfc_vport *); 82int lpfc_can_disctmo(struct lpfc_vport *);
72int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *); 83int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
73void lpfc_unreg_all_rpis(struct lpfc_vport *); 84void lpfc_unreg_all_rpis(struct lpfc_vport *);
85void lpfc_unreg_hba_rpis(struct lpfc_hba *);
74void lpfc_unreg_default_rpis(struct lpfc_vport *); 86void lpfc_unreg_default_rpis(struct lpfc_vport *);
75void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *); 87void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
76 88
@@ -97,7 +109,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
97 109
98void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); 110void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
99int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, 111int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
100 struct serv_parm *, uint32_t); 112 struct serv_parm *, uint32_t, int);
101int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); 113int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
102void lpfc_more_plogi(struct lpfc_vport *); 114void lpfc_more_plogi(struct lpfc_vport *);
103void lpfc_more_adisc(struct lpfc_vport *); 115void lpfc_more_adisc(struct lpfc_vport *);
@@ -144,6 +156,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
144 156
145void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 157void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
146 struct lpfc_iocbq *); 158 struct lpfc_iocbq *);
159void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
160 struct lpfc_iocbq *);
147int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); 161int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
148int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); 162int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
149void lpfc_fdmi_tmo(unsigned long); 163void lpfc_fdmi_tmo(unsigned long);
@@ -188,11 +202,12 @@ int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
188void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); 202void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
189void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); 203void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
190void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); 204void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
191void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); 205void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
192void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); 206void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
193void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); 207void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
194void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); 208void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
195int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t); 209int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
210void lpfc_issue_init_vpi(struct lpfc_vport *);
196 211
197void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 212void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
198 uint32_t , LPFC_MBOXQ_t *); 213 uint32_t , LPFC_MBOXQ_t *);
@@ -202,7 +217,15 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
202void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); 217void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
203void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, 218void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
204 uint16_t); 219 uint16_t);
220void lpfc_unregister_fcf(struct lpfc_hba *);
221void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
205void lpfc_unregister_unused_fcf(struct lpfc_hba *); 222void lpfc_unregister_unused_fcf(struct lpfc_hba *);
223int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
224void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
225void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
226uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
227int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
228void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
206 229
207int lpfc_mem_alloc(struct lpfc_hba *, int align); 230int lpfc_mem_alloc(struct lpfc_hba *, int align);
208void lpfc_mem_free(struct lpfc_hba *); 231void lpfc_mem_free(struct lpfc_hba *);
@@ -212,7 +235,10 @@ void lpfc_stop_vport_timers(struct lpfc_vport *);
212void lpfc_poll_timeout(unsigned long ptr); 235void lpfc_poll_timeout(unsigned long ptr);
213void lpfc_poll_start_timer(struct lpfc_hba *); 236void lpfc_poll_start_timer(struct lpfc_hba *);
214void lpfc_poll_eratt(unsigned long); 237void lpfc_poll_eratt(unsigned long);
215void lpfc_sli_poll_fcp_ring(struct lpfc_hba *); 238int
239lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
240 struct lpfc_sli_ring *, uint32_t);
241
216struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 242struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
217void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); 243void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
218uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 244uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
@@ -235,7 +261,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
235int lpfc_sli_check_eratt(struct lpfc_hba *); 261int lpfc_sli_check_eratt(struct lpfc_hba *);
236void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 262void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
237 struct lpfc_sli_ring *, uint32_t); 263 struct lpfc_sli_ring *, uint32_t);
238int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); 264void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
239void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 265void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
240int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, 266int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, uint32_t); 267 struct lpfc_iocbq *, uint32_t);
@@ -358,10 +384,13 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
358void lpfc_create_static_vport(struct lpfc_hba *); 384void lpfc_create_static_vport(struct lpfc_hba *);
359void lpfc_stop_hba_timers(struct lpfc_hba *); 385void lpfc_stop_hba_timers(struct lpfc_hba *);
360void lpfc_stop_port(struct lpfc_hba *); 386void lpfc_stop_port(struct lpfc_hba *);
387void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
388void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
361void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); 389void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
362int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 390int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
363void lpfc_start_fdiscs(struct lpfc_hba *phba); 391void lpfc_start_fdiscs(struct lpfc_hba *phba);
364 392struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
393struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
365#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 394#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
366#define HBA_EVENT_RSCN 5 395#define HBA_EVENT_RSCN 5
367#define HBA_EVENT_LINK_UP 2 396#define HBA_EVENT_LINK_UP 2
@@ -370,5 +399,5 @@ void lpfc_start_fdiscs(struct lpfc_hba *phba);
370/* functions to support SGIOv4/bsg interface */ 399/* functions to support SGIOv4/bsg interface */
371int lpfc_bsg_request(struct fc_bsg_job *); 400int lpfc_bsg_request(struct fc_bsg_job *);
372int lpfc_bsg_timeout(struct fc_bsg_job *); 401int lpfc_bsg_timeout(struct fc_bsg_job *);
373void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 402int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
374 struct lpfc_iocbq *); 403 struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 9a1bd9534d74..463b74902ac4 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -25,12 +25,14 @@
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/slab.h>
28#include <linux/utsname.h> 29#include <linux/utsname.h>
29 30
30#include <scsi/scsi.h> 31#include <scsi/scsi.h>
31#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include <scsi/fc/fc_fs.h>
34 36
35#include "lpfc_hw4.h" 37#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 38#include "lpfc_hw.h"
@@ -87,7 +89,6 @@ void
87lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 89lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
88 struct lpfc_iocbq *piocbq) 90 struct lpfc_iocbq *piocbq)
89{ 91{
90
91 struct lpfc_dmabuf *mp = NULL; 92 struct lpfc_dmabuf *mp = NULL;
92 IOCB_t *icmd = &piocbq->iocb; 93 IOCB_t *icmd = &piocbq->iocb;
93 int i; 94 int i;
@@ -97,7 +98,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
97 struct list_head head; 98 struct list_head head;
98 struct lpfc_dmabuf *bdeBuf; 99 struct lpfc_dmabuf *bdeBuf;
99 100
100 lpfc_bsg_ct_unsol_event(phba, pring, piocbq); 101 if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
102 return;
101 103
102 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { 104 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
103 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 105 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@@ -160,6 +162,40 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
160 } 162 }
161} 163}
162 164
165/**
166 * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
167 * @phba: Pointer to HBA context object.
168 * @pring: Pointer to the driver internal I/O ring.
169 * @piocbq: Pointer to the IOCBQ.
170 *
171 * This function serves as the default handler for the sli4 unsolicited
172 * abort event. It shall be invoked when there is no application interface
173 * registered unsolicited abort handler. This handler does nothing but
174 * just simply releases the dma buffer used by the unsol abort event.
175 **/
176void
177lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
178 struct lpfc_sli_ring *pring,
179 struct lpfc_iocbq *piocbq)
180{
181 IOCB_t *icmd = &piocbq->iocb;
182 struct lpfc_dmabuf *bdeBuf;
183 uint32_t size;
184
185 /* Forward abort event to any process registered to receive ct event */
186 if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
187 return;
188
189 /* If there is no BDE associated with IOCB, there is nothing to do */
190 if (icmd->ulpBdeCount == 0)
191 return;
192 bdeBuf = piocbq->context2;
193 piocbq->context2 = NULL;
194 size = icmd->un.cont64[0].tus.f.bdeSize;
195 lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
196 lpfc_in_buf_free(phba, bdeBuf);
197}
198
163static void 199static void
164lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 200lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
165{ 201{
@@ -304,8 +340,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
304 /* Fill in rest of iocb */ 340 /* Fill in rest of iocb */
305 icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 341 icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
306 icmd->un.genreq64.w5.hcsw.Dfctl = 0; 342 icmd->un.genreq64.w5.hcsw.Dfctl = 0;
307 icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; 343 icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
308 icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; 344 icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
309 345
310 if (!tmo) { 346 if (!tmo) {
311 /* FC spec states we need 3 * ratov for CT requests */ 347 /* FC spec states we need 3 * ratov for CT requests */
@@ -363,9 +399,14 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
363 outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); 399 outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
364 if (!outmp) 400 if (!outmp)
365 return -ENOMEM; 401 return -ENOMEM;
366 402 /*
403 * Form the CT IOCB. The total number of BDEs in this IOCB
404 * is the single command plus response count from
405 * lpfc_alloc_ct_rsp.
406 */
407 cnt += 1;
367 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, 408 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
368 cnt+1, 0, retry); 409 cnt, 0, retry);
369 if (status) { 410 if (status) {
370 lpfc_free_ct_rsp(phba, outmp); 411 lpfc_free_ct_rsp(phba, outmp);
371 return -ENOMEM; 412 return -ENOMEM;
@@ -501,6 +542,9 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
501 SLI_CTNS_GFF_ID, 542 SLI_CTNS_GFF_ID,
502 0, Did) == 0) 543 0, Did) == 0)
503 vport->num_disc_nodes++; 544 vport->num_disc_nodes++;
545 else
546 lpfc_setup_disc_node
547 (vport, Did);
504 } 548 }
505 else { 549 else {
506 lpfc_debugfs_disc_trc(vport, 550 lpfc_debugfs_disc_trc(vport,
@@ -1209,7 +1253,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1209 be16_to_cpu(SLI_CTNS_RFF_ID); 1253 be16_to_cpu(SLI_CTNS_RFF_ID);
1210 CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); 1254 CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
1211 CtReq->un.rff.fbits = FC4_FEATURE_INIT; 1255 CtReq->un.rff.fbits = FC4_FEATURE_INIT;
1212 CtReq->un.rff.type_code = FC_FCP_DATA; 1256 CtReq->un.rff.type_code = FC_TYPE_FCP;
1213 cmpl = lpfc_cmpl_ct_cmd_rff_id; 1257 cmpl = lpfc_cmpl_ct_cmd_rff_id;
1214 break; 1258 break;
1215 } 1259 }
@@ -1802,12 +1846,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1802 c = (rev & 0x0000ff00) >> 8; 1846 c = (rev & 0x0000ff00) >> 8;
1803 b4 = (rev & 0x000000ff); 1847 b4 = (rev & 0x000000ff);
1804 1848
1805 if (flag) 1849 sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
1806 sprintf(fwrevision, "%d.%d%d%c%d ", b1,
1807 b2, b3, c, b4);
1808 else
1809 sprintf(fwrevision, "%d.%d%d%c%d ", b1,
1810 b2, b3, c, b4);
1811 } 1850 }
1812 return; 1851 return;
1813} 1852}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 8d0f0de76b63..a80d938fafc9 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -24,6 +24,7 @@
24#include <linux/idr.h> 24#include <linux/idr.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/kthread.h> 26#include <linux/kthread.h>
27#include <linux/slab.h>
27#include <linux/pci.h> 28#include <linux/pci.h>
28#include <linux/spinlock.h> 29#include <linux/spinlock.h>
29#include <linux/ctype.h> 30#include <linux/ctype.h>
@@ -926,7 +927,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
926 goto out; 927 goto out;
927 928
928 /* Round to page boundry */ 929 /* Round to page boundry */
929 printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n", 930 printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
930 __func__, _dump_buf_data); 931 __func__, _dump_buf_data);
931 debug->buffer = _dump_buf_data; 932 debug->buffer = _dump_buf_data;
932 if (!debug->buffer) { 933 if (!debug->buffer) {
@@ -956,8 +957,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
956 goto out; 957 goto out;
957 958
958 /* Round to page boundry */ 959 /* Round to page boundry */
959 printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__, 960 printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n",
960 _dump_buf_dif, file->f_dentry->d_name.name); 961 __func__, _dump_buf_dif, file->f_dentry->d_name.name);
961 debug->buffer = _dump_buf_dif; 962 debug->buffer = _dump_buf_dif;
962 if (!debug->buffer) { 963 if (!debug->buffer) {
963 kfree(debug); 964 kfree(debug);
@@ -1377,7 +1378,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1377 debugfs_create_dir(name, phba->hba_debugfs_root); 1378 debugfs_create_dir(name, phba->hba_debugfs_root);
1378 if (!vport->vport_debugfs_root) { 1379 if (!vport->vport_debugfs_root) {
1379 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1380 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1380 "0417 Cant create debugfs"); 1381 "0417 Cant create debugfs\n");
1381 goto debug_failed; 1382 goto debug_failed;
1382 } 1383 }
1383 atomic_inc(&phba->debugfs_vport_count); 1384 atomic_inc(&phba->debugfs_vport_count);
@@ -1430,7 +1431,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1430 vport, &lpfc_debugfs_op_nodelist); 1431 vport, &lpfc_debugfs_op_nodelist);
1431 if (!vport->debug_nodelist) { 1432 if (!vport->debug_nodelist) {
1432 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1433 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1433 "0409 Cant create debugfs nodelist"); 1434 "0409 Cant create debugfs nodelist\n");
1434 goto debug_failed; 1435 goto debug_failed;
1435 } 1436 }
1436debug_failed: 1437debug_failed:
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1142070e9484..2851d75ffc6f 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -19,7 +19,7 @@
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */ 21#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */
22#define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */ 22#define FC_MAX_NS_RSP 64512 /* max size NameServer rsp */
23#define FC_MAXLOOP 126 /* max devices supported on a fc loop */ 23#define FC_MAXLOOP 126 /* max devices supported on a fc loop */
24#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */ 24#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */
25 25
@@ -105,8 +105,6 @@ struct lpfc_nodelist {
105 struct lpfc_vport *vport; 105 struct lpfc_vport *vport;
106 struct lpfc_work_evt els_retry_evt; 106 struct lpfc_work_evt els_retry_evt;
107 struct lpfc_work_evt dev_loss_evt; 107 struct lpfc_work_evt dev_loss_evt;
108 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
109 unsigned long last_q_full_time; /* jiffy of last queue full */
110 struct kref kref; 108 struct kref kref;
111 atomic_t cmd_pending; 109 atomic_t cmd_pending;
112 uint32_t cmd_qdepth; 110 uint32_t cmd_qdepth;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 45337cd23feb..5fbdb22c1899 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -21,6 +21,7 @@
21/* See Fibre Channel protocol T11 FC-LS for details */ 21/* See Fibre Channel protocol T11 FC-LS for details */
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
@@ -50,9 +51,6 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
50 struct lpfc_nodelist *ndlp, uint8_t retry); 51 struct lpfc_nodelist *ndlp, uint8_t retry);
51static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 52static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
52 struct lpfc_iocbq *iocb); 53 struct lpfc_iocbq *iocb);
53static void lpfc_register_new_vport(struct lpfc_hba *phba,
54 struct lpfc_vport *vport,
55 struct lpfc_nodelist *ndlp);
56 54
57static int lpfc_max_els_tries = 3; 55static int lpfc_max_els_tries = 3;
58 56
@@ -173,13 +171,26 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 171 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */ 172 */
175 if ((did == Fabric_DID) && 173 if ((did == Fabric_DID) &&
176 bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) && 174 (phba->hba_flag & HBA_FIP_SUPPORT) &&
177 ((elscmd == ELS_CMD_FLOGI) || 175 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) || 176 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO))) 177 (elscmd == ELS_CMD_LOGO)))
180 elsiocb->iocb_flag |= LPFC_FIP_ELS; 178 switch (elscmd) {
179 case ELS_CMD_FLOGI:
180 elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
181 & LPFC_FIP_ELS_ID_MASK);
182 break;
183 case ELS_CMD_FDISC:
184 elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
185 & LPFC_FIP_ELS_ID_MASK);
186 break;
187 case ELS_CMD_LOGO:
188 elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
189 & LPFC_FIP_ELS_ID_MASK);
190 break;
191 }
181 else 192 else
182 elsiocb->iocb_flag &= ~LPFC_FIP_ELS; 193 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
183 194
184 icmd = &elsiocb->iocb; 195 icmd = &elsiocb->iocb;
185 196
@@ -579,6 +590,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
579 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 590 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
580 spin_unlock_irq(shost->host_lock); 591 spin_unlock_irq(shost->host_lock);
581 } 592 }
593 /*
594 * If VPI is unreged, driver need to do INIT_VPI
595 * before re-registering
596 */
597 if (phba->sli_rev == LPFC_SLI_REV4) {
598 spin_lock_irq(shost->host_lock);
599 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
600 spin_unlock_irq(shost->host_lock);
601 }
582 } 602 }
583 603
584 if (phba->sli_rev < LPFC_SLI_REV4) { 604 if (phba->sli_rev < LPFC_SLI_REV4) {
@@ -591,10 +611,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
591 } else { 611 } else {
592 ndlp->nlp_type |= NLP_FABRIC; 612 ndlp->nlp_type |= NLP_FABRIC;
593 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 613 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
594 if (vport->vfi_state & LPFC_VFI_REGISTERED) { 614 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
615 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
595 lpfc_start_fdiscs(phba); 616 lpfc_start_fdiscs(phba);
596 lpfc_do_scr_ns_plogi(phba, vport); 617 lpfc_do_scr_ns_plogi(phba, vport);
597 } else 618 } else if (vport->fc_flag & FC_VFI_REGISTERED)
619 lpfc_issue_init_vpi(vport);
620 else
598 lpfc_issue_reg_vfi(vport); 621 lpfc_issue_reg_vfi(vport);
599 } 622 }
600 return 0; 623 return 0;
@@ -749,6 +772,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
749 struct lpfc_nodelist *ndlp = cmdiocb->context1; 772 struct lpfc_nodelist *ndlp = cmdiocb->context1;
750 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 773 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
751 struct serv_parm *sp; 774 struct serv_parm *sp;
775 uint16_t fcf_index;
752 int rc; 776 int rc;
753 777
754 /* Check to see if link went down during discovery */ 778 /* Check to see if link went down during discovery */
@@ -766,6 +790,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
766 vport->port_state); 790 vport->port_state);
767 791
768 if (irsp->ulpStatus) { 792 if (irsp->ulpStatus) {
793 /*
794 * In case of FIP mode, perform round robin FCF failover
795 * due to new FCF discovery
796 */
797 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
798 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
799 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
800 "2611 FLOGI failed on registered "
801 "FCF record fcf_index:%d, trying "
802 "to perform round robin failover\n",
803 phba->fcf.current_rec.fcf_indx);
804 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
805 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
806 /*
807 * Exhausted the eligible FCF record list,
808 * fail through to retry FLOGI on current
809 * FCF record.
810 */
811 lpfc_printf_log(phba, KERN_WARNING,
812 LOG_FIP | LOG_ELS,
813 "2760 FLOGI exhausted FCF "
814 "round robin failover list, "
815 "retry FLOGI on the current "
816 "registered FCF index:%d\n",
817 phba->fcf.current_rec.fcf_indx);
818 spin_lock_irq(&phba->hbalock);
819 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
820 spin_unlock_irq(&phba->hbalock);
821 } else {
822 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
823 fcf_index);
824 if (rc) {
825 lpfc_printf_log(phba, KERN_WARNING,
826 LOG_FIP | LOG_ELS,
827 "2761 FLOGI round "
828 "robin FCF failover "
829 "read FCF failed "
830 "rc:x%x, fcf_index:"
831 "%d\n", rc,
832 phba->fcf.current_rec.fcf_indx);
833 spin_lock_irq(&phba->hbalock);
834 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
835 spin_unlock_irq(&phba->hbalock);
836 } else
837 goto out;
838 }
839 }
840
769 /* Check for retry */ 841 /* Check for retry */
770 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 842 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
771 goto out; 843 goto out;
@@ -784,13 +856,15 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
784 } 856 }
785 857
786 /* FLOGI failure */ 858 /* FLOGI failure */
787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 859 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
788 "0100 FLOGI failure Data: x%x x%x " 860 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
789 "x%x\n",
790 irsp->ulpStatus, irsp->un.ulpWord[4], 861 irsp->ulpStatus, irsp->un.ulpWord[4],
791 irsp->ulpTimeout); 862 irsp->ulpTimeout);
792 goto flogifail; 863 goto flogifail;
793 } 864 }
865 spin_lock_irq(shost->host_lock);
866 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
867 spin_unlock_irq(shost->host_lock);
794 868
795 /* 869 /*
796 * The FLogI succeeded. Sync the data for the CPU before 870 * The FLogI succeeded. Sync the data for the CPU before
@@ -802,7 +876,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
802 876
803 /* FLOGI completes successfully */ 877 /* FLOGI completes successfully */
804 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 878 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
805 "0101 FLOGI completes sucessfully " 879 "0101 FLOGI completes successfully "
806 "Data: x%x x%x x%x x%x\n", 880 "Data: x%x x%x x%x x%x\n",
807 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 881 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
808 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 882 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
@@ -817,8 +891,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
817 else 891 else
818 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 892 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
819 893
820 if (!rc) 894 if (!rc) {
895 /* Mark the FCF discovery process done */
896 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
897 "2769 FLOGI successful on FCF record: "
898 "current_fcf_index:x%x, terminate FCF "
899 "round robin failover process\n",
900 phba->fcf.current_rec.fcf_indx);
901 spin_lock_irq(&phba->hbalock);
902 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
903 spin_unlock_irq(&phba->hbalock);
821 goto out; 904 goto out;
905 }
822 } 906 }
823 907
824flogifail: 908flogifail:
@@ -956,7 +1040,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
956 * function returns, it does not guarantee all the IOCBs are actually aborted. 1040 * function returns, it does not guarantee all the IOCBs are actually aborted.
957 * 1041 *
958 * Return code 1042 * Return code
959 * 0 - Sucessfully issued abort iocb on all outstanding flogis (Always 0) 1043 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
960 **/ 1044 **/
961int 1045int
962lpfc_els_abort_flogi(struct lpfc_hba *phba) 1046lpfc_els_abort_flogi(struct lpfc_hba *phba)
@@ -1384,6 +1468,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1384 goto out; 1468 goto out;
1385 } 1469 }
1386 /* PLOGI failed */ 1470 /* PLOGI failed */
1471 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1472 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1473 ndlp->nlp_DID, irsp->ulpStatus,
1474 irsp->un.ulpWord[4]);
1387 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1475 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1388 if (lpfc_error_lost_link(irsp)) 1476 if (lpfc_error_lost_link(irsp))
1389 rc = NLP_STE_FREED_NODE; 1477 rc = NLP_STE_FREED_NODE;
@@ -1552,6 +1640,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1552 goto out; 1640 goto out;
1553 } 1641 }
1554 /* PRLI failed */ 1642 /* PRLI failed */
1643 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1644 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1645 ndlp->nlp_DID, irsp->ulpStatus,
1646 irsp->un.ulpWord[4]);
1555 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1647 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1556 if (lpfc_error_lost_link(irsp)) 1648 if (lpfc_error_lost_link(irsp))
1557 goto out; 1649 goto out;
@@ -1835,6 +1927,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1835 goto out; 1927 goto out;
1836 } 1928 }
1837 /* ADISC failed */ 1929 /* ADISC failed */
1930 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1931 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
1932 ndlp->nlp_DID, irsp->ulpStatus,
1933 irsp->un.ulpWord[4]);
1838 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1934 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1839 if (!lpfc_error_lost_link(irsp)) 1935 if (!lpfc_error_lost_link(irsp))
1840 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1936 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -1984,6 +2080,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1984 /* ELS command is being retried */ 2080 /* ELS command is being retried */
1985 goto out; 2081 goto out;
1986 /* LOGO failed */ 2082 /* LOGO failed */
2083 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2084 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2085 ndlp->nlp_DID, irsp->ulpStatus,
2086 irsp->un.ulpWord[4]);
1987 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2087 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1988 if (lpfc_error_lost_link(irsp)) 2088 if (lpfc_error_lost_link(irsp))
1989 goto out; 2089 goto out;
@@ -2452,6 +2552,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2452 */ 2552 */
2453 del_timer_sync(&ndlp->nlp_delayfunc); 2553 del_timer_sync(&ndlp->nlp_delayfunc);
2454 retry = ndlp->nlp_retry; 2554 retry = ndlp->nlp_retry;
2555 ndlp->nlp_retry = 0;
2455 2556
2456 switch (cmd) { 2557 switch (cmd) {
2457 case ELS_CMD_FLOGI: 2558 case ELS_CMD_FLOGI:
@@ -2706,17 +2807,21 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2706 if (did == FDMI_DID) 2807 if (did == FDMI_DID)
2707 retry = 1; 2808 retry = 1;
2708 2809
2709 if ((cmd == ELS_CMD_FLOGI) && 2810 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
2710 (phba->fc_topology != TOPOLOGY_LOOP) && 2811 (phba->fc_topology != TOPOLOGY_LOOP) &&
2711 !lpfc_error_lost_link(irsp)) { 2812 !lpfc_error_lost_link(irsp)) {
2712 /* FLOGI retry policy */ 2813 /* FLOGI retry policy */
2713 retry = 1; 2814 retry = 1;
2714 maxretry = 48; 2815 /* retry forever */
2715 if (cmdiocb->retry >= 32) 2816 maxretry = 0;
2817 if (cmdiocb->retry >= 100)
2818 delay = 5000;
2819 else if (cmdiocb->retry >= 32)
2716 delay = 1000; 2820 delay = 1000;
2717 } 2821 }
2718 2822
2719 if ((++cmdiocb->retry) >= maxretry) { 2823 cmdiocb->retry++;
2824 if (maxretry && (cmdiocb->retry >= maxretry)) {
2720 phba->fc_stat.elsRetryExceeded++; 2825 phba->fc_stat.elsRetryExceeded++;
2721 retry = 0; 2826 retry = 0;
2722 } 2827 }
@@ -3099,7 +3204,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3099 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3204 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3100 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 3205 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
3101 /* A LS_RJT associated with Default RPI cleanup has its own 3206 /* A LS_RJT associated with Default RPI cleanup has its own
3102 * seperate code path. 3207 * separate code path.
3103 */ 3208 */
3104 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3209 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3105 ls_rjt = 1; 3210 ls_rjt = 1;
@@ -4124,8 +4229,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4124 spin_lock_irq(shost->host_lock); 4229 spin_lock_irq(shost->host_lock);
4125 if (vport->fc_rscn_flush) { 4230 if (vport->fc_rscn_flush) {
4126 /* Another thread is walking fc_rscn_id_list on this vport */ 4231 /* Another thread is walking fc_rscn_id_list on this vport */
4127 spin_unlock_irq(shost->host_lock);
4128 vport->fc_flag |= FC_RSCN_DISCOVERY; 4232 vport->fc_flag |= FC_RSCN_DISCOVERY;
4233 spin_unlock_irq(shost->host_lock);
4129 /* Send back ACC */ 4234 /* Send back ACC */
4130 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4235 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4131 return 0; 4236 return 0;
@@ -4133,7 +4238,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4133 /* Indicate we are walking fc_rscn_id_list on this vport */ 4238 /* Indicate we are walking fc_rscn_id_list on this vport */
4134 vport->fc_rscn_flush = 1; 4239 vport->fc_rscn_flush = 1;
4135 spin_unlock_irq(shost->host_lock); 4240 spin_unlock_irq(shost->host_lock);
4136 /* Get the array count after sucessfully have the token */ 4241 /* Get the array count after successfully have the token */
4137 rscn_cnt = vport->fc_rscn_id_cnt; 4242 rscn_cnt = vport->fc_rscn_id_cnt;
4138 /* If we are already processing an RSCN, save the received 4243 /* If we are already processing an RSCN, save the received
4139 * RSCN payload buffer, cmdiocb->context2 to process later. 4244 * RSCN payload buffer, cmdiocb->context2 to process later.
@@ -4367,7 +4472,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4367 4472
4368 did = Fabric_DID; 4473 did = Fabric_DID;
4369 4474
4370 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) { 4475 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
4371 /* For a FLOGI we accept, then if our portname is greater 4476 /* For a FLOGI we accept, then if our portname is greater
4372 * then the remote portname we initiate Nport login. 4477 * then the remote portname we initiate Nport login.
4373 */ 4478 */
@@ -4503,6 +4608,29 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4503} 4608}
4504 4609
4505/** 4610/**
4611 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
4612 * @vport: pointer to a host virtual N_Port data structure.
4613 * @cmdiocb: pointer to lpfc command iocb data structure.
4614 * @ndlp: pointer to a node-list data structure.
4615 *
4616 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
4617 * received as an ELS unsolicited event. A request to RRQ shall only
4618 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
4619 * Nx_Port N_Port_ID of the target Exchange is the same as the
4620 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
4621 * not accepted, an LS_RJT with reason code "Unable to perform
4622 * command request" and reason code explanation "Invalid Originator
4623 * S_ID" shall be returned. For now, we just unconditionally accept
4624 * RRQ from the target.
4625 **/
4626static void
4627lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4628 struct lpfc_nodelist *ndlp)
4629{
4630 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4631}
4632
4633/**
4506 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 4634 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
4507 * @phba: pointer to lpfc hba data structure. 4635 * @phba: pointer to lpfc hba data structure.
4508 * @pmb: pointer to the driver internal queue element for mailbox command. 4636 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -5396,7 +5524,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5396 if (lpfc_els_chk_latt(vport)) 5524 if (lpfc_els_chk_latt(vport))
5397 goto dropit; 5525 goto dropit;
5398 5526
5399 /* Ignore traffic recevied during vport shutdown. */ 5527 /* Ignore traffic received during vport shutdown. */
5400 if (vport->load_flag & FC_UNLOADING) 5528 if (vport->load_flag & FC_UNLOADING)
5401 goto dropit; 5529 goto dropit;
5402 5530
@@ -5618,6 +5746,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5618 if (newnode) 5746 if (newnode)
5619 lpfc_nlp_put(ndlp); 5747 lpfc_nlp_put(ndlp);
5620 break; 5748 break;
5749 case ELS_CMD_RRQ:
5750 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5751 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
5752 did, vport->port_state, ndlp->nlp_flag);
5753
5754 phba->fc_stat.elsRcvRRQ++;
5755 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
5756 if (newnode)
5757 lpfc_nlp_put(ndlp);
5758 break;
5621 default: 5759 default:
5622 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 5760 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5623 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 5761 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -5670,7 +5808,7 @@ dropit:
5670 * NULL - No vport with the matching @vpi found 5808 * NULL - No vport with the matching @vpi found
5671 * Otherwise - Address to the vport with the matching @vpi. 5809 * Otherwise - Address to the vport with the matching @vpi.
5672 **/ 5810 **/
5673static struct lpfc_vport * 5811struct lpfc_vport *
5674lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 5812lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5675{ 5813{
5676 struct lpfc_vport *vport; 5814 struct lpfc_vport *vport;
@@ -5864,6 +6002,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5864 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6002 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5865 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 6003 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5866 MAILBOX_t *mb = &pmb->u.mb; 6004 MAILBOX_t *mb = &pmb->u.mb;
6005 int rc;
5867 6006
5868 spin_lock_irq(shost->host_lock); 6007 spin_lock_irq(shost->host_lock);
5869 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 6008 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5885,6 +6024,26 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5885 spin_unlock_irq(shost->host_lock); 6024 spin_unlock_irq(shost->host_lock);
5886 lpfc_can_disctmo(vport); 6025 lpfc_can_disctmo(vport);
5887 break; 6026 break;
6027 /* If reg_vpi fail with invalid VPI status, re-init VPI */
6028 case 0x20:
6029 spin_lock_irq(shost->host_lock);
6030 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6031 spin_unlock_irq(shost->host_lock);
6032 lpfc_init_vpi(phba, pmb, vport->vpi);
6033 pmb->vport = vport;
6034 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
6035 rc = lpfc_sli_issue_mbox(phba, pmb,
6036 MBX_NOWAIT);
6037 if (rc == MBX_NOT_FINISHED) {
6038 lpfc_printf_vlog(vport,
6039 KERN_ERR, LOG_MBOX,
6040 "2732 Failed to issue INIT_VPI"
6041 " mailbox command\n");
6042 } else {
6043 lpfc_nlp_put(ndlp);
6044 return;
6045 }
6046
5888 default: 6047 default:
5889 /* Try to recover from this error */ 6048 /* Try to recover from this error */
5890 lpfc_mbx_unreg_vpi(vport); 6049 lpfc_mbx_unreg_vpi(vport);
@@ -5897,14 +6056,23 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5897 lpfc_initial_fdisc(vport); 6056 lpfc_initial_fdisc(vport);
5898 break; 6057 break;
5899 } 6058 }
5900
5901 } else { 6059 } else {
5902 if (vport == phba->pport) 6060 spin_lock_irq(shost->host_lock);
6061 vport->vpi_state |= LPFC_VPI_REGISTERED;
6062 spin_unlock_irq(shost->host_lock);
6063 if (vport == phba->pport) {
5903 if (phba->sli_rev < LPFC_SLI_REV4) 6064 if (phba->sli_rev < LPFC_SLI_REV4)
5904 lpfc_issue_fabric_reglogin(vport); 6065 lpfc_issue_fabric_reglogin(vport);
5905 else 6066 else {
5906 lpfc_issue_reg_vfi(vport); 6067 /*
5907 else 6068 * If the physical port is instantiated using
6069 * FDISC, do not start vport discovery.
6070 */
6071 if (vport->port_state != LPFC_FDISC)
6072 lpfc_start_fdiscs(phba);
6073 lpfc_do_scr_ns_plogi(phba, vport);
6074 }
6075 } else
5908 lpfc_do_scr_ns_plogi(phba, vport); 6076 lpfc_do_scr_ns_plogi(phba, vport);
5909 } 6077 }
5910 6078
@@ -5926,7 +6094,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5926 * This routine registers the @vport as a new virtual port with a HBA. 6094 * This routine registers the @vport as a new virtual port with a HBA.
5927 * It is done through a registering vpi mailbox command. 6095 * It is done through a registering vpi mailbox command.
5928 **/ 6096 **/
5929static void 6097void
5930lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 6098lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5931 struct lpfc_nodelist *ndlp) 6099 struct lpfc_nodelist *ndlp)
5932{ 6100{
@@ -5967,6 +6135,92 @@ mbox_err_exit:
5967} 6135}
5968 6136
5969/** 6137/**
6138 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6139 * @phba: pointer to lpfc hba data structure.
6140 *
6141 * This routine cancels the retry delay timers to all the vports.
6142 **/
6143void
6144lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
6145{
6146 struct lpfc_vport **vports;
6147 struct lpfc_nodelist *ndlp;
6148 uint32_t link_state;
6149 int i;
6150
6151 /* Treat this failure as linkdown for all vports */
6152 link_state = phba->link_state;
6153 lpfc_linkdown(phba);
6154 phba->link_state = link_state;
6155
6156 vports = lpfc_create_vport_work_array(phba);
6157
6158 if (vports) {
6159 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6160 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6161 if (ndlp)
6162 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6163 lpfc_els_flush_cmd(vports[i]);
6164 }
6165 lpfc_destroy_vport_work_array(phba, vports);
6166 }
6167}
6168
6169/**
6170 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6171 * @phba: pointer to lpfc hba data structure.
6172 *
6173 * This routine abort all pending discovery commands and
6174 * start a timer to retry FLOGI for the physical port
6175 * discovery.
6176 **/
6177void
6178lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6179{
6180 struct lpfc_nodelist *ndlp;
6181 struct Scsi_Host *shost;
6182
6183 /* Cancel the all vports retry delay retry timers */
6184 lpfc_cancel_all_vport_retry_delay_timer(phba);
6185
6186 /* If fabric require FLOGI, then re-instantiate physical login */
6187 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6188 if (!ndlp)
6189 return;
6190
6191 shost = lpfc_shost_from_vport(phba->pport);
6192 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6193 spin_lock_irq(shost->host_lock);
6194 ndlp->nlp_flag |= NLP_DELAY_TMO;
6195 spin_unlock_irq(shost->host_lock);
6196 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
6197 phba->pport->port_state = LPFC_FLOGI;
6198 return;
6199}
6200
6201/**
6202 * lpfc_fabric_login_reqd - Check if FLOGI required.
6203 * @phba: pointer to lpfc hba data structure.
6204 * @cmdiocb: pointer to FDISC command iocb.
6205 * @rspiocb: pointer to FDISC response iocb.
6206 *
6207 * This routine checks if a FLOGI is reguired for FDISC
6208 * to succeed.
6209 **/
6210static int
6211lpfc_fabric_login_reqd(struct lpfc_hba *phba,
6212 struct lpfc_iocbq *cmdiocb,
6213 struct lpfc_iocbq *rspiocb)
6214{
6215
6216 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
6217 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
6218 return 0;
6219 else
6220 return 1;
6221}
6222
6223/**
5970 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 6224 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
5971 * @phba: pointer to lpfc hba data structure. 6225 * @phba: pointer to lpfc hba data structure.
5972 * @cmdiocb: pointer to lpfc command iocb data structure. 6226 * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -6015,6 +6269,12 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6015 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 6269 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
6016 6270
6017 if (irsp->ulpStatus) { 6271 if (irsp->ulpStatus) {
6272
6273 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
6274 lpfc_retry_pport_discovery(phba);
6275 goto out;
6276 }
6277
6018 /* Check for retry */ 6278 /* Check for retry */
6019 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 6279 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
6020 goto out; 6280 goto out;
@@ -6024,12 +6284,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6024 irsp->ulpStatus, irsp->un.ulpWord[4]); 6284 irsp->ulpStatus, irsp->un.ulpWord[4]);
6025 goto fdisc_failed; 6285 goto fdisc_failed;
6026 } 6286 }
6027 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
6028 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6029 lpfc_nlp_put(ndlp);
6030 /* giving up on FDISC. Cancel discovery timer */
6031 lpfc_can_disctmo(vport);
6032 spin_lock_irq(shost->host_lock); 6287 spin_lock_irq(shost->host_lock);
6288 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
6033 vport->fc_flag |= FC_FABRIC; 6289 vport->fc_flag |= FC_FABRIC;
6034 if (vport->phba->fc_topology == TOPOLOGY_LOOP) 6290 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
6035 vport->fc_flag |= FC_PUBLIC_LOOP; 6291 vport->fc_flag |= FC_PUBLIC_LOOP;
@@ -6057,10 +6313,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6057 lpfc_mbx_unreg_vpi(vport); 6313 lpfc_mbx_unreg_vpi(vport);
6058 spin_lock_irq(shost->host_lock); 6314 spin_lock_irq(shost->host_lock);
6059 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6315 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6316 if (phba->sli_rev == LPFC_SLI_REV4)
6317 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6060 spin_unlock_irq(shost->host_lock); 6318 spin_unlock_irq(shost->host_lock);
6061 } 6319 }
6062 6320
6063 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 6321 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
6322 lpfc_issue_init_vpi(vport);
6323 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
6064 lpfc_register_new_vport(phba, vport, ndlp); 6324 lpfc_register_new_vport(phba, vport, ndlp);
6065 else 6325 else
6066 lpfc_do_scr_ns_plogi(phba, vport); 6326 lpfc_do_scr_ns_plogi(phba, vport);
@@ -6107,6 +6367,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6107 int did = ndlp->nlp_DID; 6367 int did = ndlp->nlp_DID;
6108 int rc; 6368 int rc;
6109 6369
6370 vport->port_state = LPFC_FDISC;
6110 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 6371 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
6111 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 6372 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
6112 ELS_CMD_FDISC); 6373 ELS_CMD_FDISC);
@@ -6172,7 +6433,6 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6172 return 1; 6433 return 1;
6173 } 6434 }
6174 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 6435 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
6175 vport->port_state = LPFC_FDISC;
6176 return 0; 6436 return 0;
6177} 6437}
6178 6438
@@ -6632,21 +6892,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6632 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6892 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6633 unsigned long iflag = 0; 6893 unsigned long iflag = 0;
6634 6894
6635 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6895 spin_lock_irqsave(&phba->hbalock, iflag);
6896 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6636 list_for_each_entry_safe(sglq_entry, sglq_next, 6897 list_for_each_entry_safe(sglq_entry, sglq_next,
6637 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 6898 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6638 if (sglq_entry->sli4_xritag == xri) { 6899 if (sglq_entry->sli4_xritag == xri) {
6639 list_del(&sglq_entry->list); 6900 list_del(&sglq_entry->list);
6640 spin_unlock_irqrestore(
6641 &phba->sli4_hba.abts_sgl_list_lock,
6642 iflag);
6643 spin_lock_irqsave(&phba->hbalock, iflag);
6644
6645 list_add_tail(&sglq_entry->list, 6901 list_add_tail(&sglq_entry->list,
6646 &phba->sli4_hba.lpfc_sgl_list); 6902 &phba->sli4_hba.lpfc_sgl_list);
6903 sglq_entry->state = SGL_FREED;
6904 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6647 spin_unlock_irqrestore(&phba->hbalock, iflag); 6905 spin_unlock_irqrestore(&phba->hbalock, iflag);
6648 return; 6906 return;
6649 } 6907 }
6650 } 6908 }
6651 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6909 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6910 sglq_entry = __lpfc_get_active_sglq(phba, xri);
6911 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
6912 spin_unlock_irqrestore(&phba->hbalock, iflag);
6913 return;
6914 }
6915 sglq_entry->state = SGL_XRI_ABORTED;
6916 spin_unlock_irqrestore(&phba->hbalock, iflag);
6917 return;
6652} 6918}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e6a47e25b218..e1466eec56b7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -20,6 +20,7 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/slab.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
24#include <linux/kthread.h> 25#include <linux/kthread.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
@@ -525,8 +526,8 @@ lpfc_work_done(struct lpfc_hba *phba)
525 spin_unlock_irq(&phba->hbalock); 526 spin_unlock_irq(&phba->hbalock);
526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 527 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
527 } 528 }
528 if (phba->hba_flag & HBA_RECEIVE_BUFFER) 529 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
529 lpfc_sli4_handle_received_buffer(phba); 530 lpfc_sli4_fcf_redisc_event_proc(phba);
530 } 531 }
531 532
532 vports = lpfc_create_vport_work_array(phba); 533 vports = lpfc_create_vport_work_array(phba);
@@ -568,8 +569,9 @@ lpfc_work_done(struct lpfc_hba *phba)
568 pring = &phba->sli.ring[LPFC_ELS_RING]; 569 pring = &phba->sli.ring[LPFC_ELS_RING];
569 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 570 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
570 status >>= (4*LPFC_ELS_RING); 571 status >>= (4*LPFC_ELS_RING);
571 if ((status & HA_RXMASK) 572 if ((status & HA_RXMASK) ||
572 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 573 (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
574 (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
573 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 575 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
574 pring->flag |= LPFC_DEFERRED_RING_EVENT; 576 pring->flag |= LPFC_DEFERRED_RING_EVENT;
575 /* Set the lpfc data pending flag */ 577 /* Set the lpfc data pending flag */
@@ -688,7 +690,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
688 lpfc_unreg_rpi(vport, ndlp); 690 lpfc_unreg_rpi(vport, ndlp);
689 691
690 /* Leave Fabric nodes alone on link down */ 692 /* Leave Fabric nodes alone on link down */
691 if (!remove && ndlp->nlp_type & NLP_FABRIC) 693 if ((phba->sli_rev < LPFC_SLI_REV4) &&
694 (!remove && ndlp->nlp_type & NLP_FABRIC))
692 continue; 695 continue;
693 rc = lpfc_disc_state_machine(vport, ndlp, NULL, 696 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
694 remove 697 remove
@@ -706,6 +709,11 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
706void 709void
707lpfc_port_link_failure(struct lpfc_vport *vport) 710lpfc_port_link_failure(struct lpfc_vport *vport)
708{ 711{
712 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
713
714 /* Cleanup any outstanding received buffers */
715 lpfc_cleanup_rcv_buffers(vport);
716
709 /* Cleanup any outstanding RSCN activity */ 717 /* Cleanup any outstanding RSCN activity */
710 lpfc_els_flush_rscn(vport); 718 lpfc_els_flush_rscn(vport);
711 719
@@ -744,13 +752,19 @@ lpfc_linkdown(struct lpfc_hba *phba)
744 752
745 if (phba->link_state == LPFC_LINK_DOWN) 753 if (phba->link_state == LPFC_LINK_DOWN)
746 return 0; 754 return 0;
755
756 /* Block all SCSI stack I/Os */
757 lpfc_scsi_dev_block(phba);
758
747 spin_lock_irq(&phba->hbalock); 759 spin_lock_irq(&phba->hbalock);
748 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); 760 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
761 spin_unlock_irq(&phba->hbalock);
749 if (phba->link_state > LPFC_LINK_DOWN) { 762 if (phba->link_state > LPFC_LINK_DOWN) {
750 phba->link_state = LPFC_LINK_DOWN; 763 phba->link_state = LPFC_LINK_DOWN;
764 spin_lock_irq(shost->host_lock);
751 phba->pport->fc_flag &= ~FC_LBIT; 765 phba->pport->fc_flag &= ~FC_LBIT;
766 spin_unlock_irq(shost->host_lock);
752 } 767 }
753 spin_unlock_irq(&phba->hbalock);
754 vports = lpfc_create_vport_work_array(phba); 768 vports = lpfc_create_vport_work_array(phba);
755 if (vports != NULL) 769 if (vports != NULL)
756 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 770 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -1015,13 +1029,12 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1015 mempool_free(mboxq, phba->mbox_mem_pool); 1029 mempool_free(mboxq, phba->mbox_mem_pool);
1016 return; 1030 return;
1017 } 1031 }
1018 if (vport->port_state != LPFC_FLOGI) { 1032 spin_lock_irqsave(&phba->hbalock, flags);
1019 spin_lock_irqsave(&phba->hbalock, flags); 1033 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1020 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1034 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1021 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1035 spin_unlock_irqrestore(&phba->hbalock, flags);
1022 spin_unlock_irqrestore(&phba->hbalock, flags); 1036 if (vport->port_state != LPFC_FLOGI)
1023 lpfc_initial_flogi(vport); 1037 lpfc_initial_flogi(vport);
1024 }
1025 1038
1026 mempool_free(mboxq, phba->mbox_mem_pool); 1039 mempool_free(mboxq, phba->mbox_mem_pool);
1027 return; 1040 return;
@@ -1039,25 +1052,23 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1039static uint32_t 1052static uint32_t
1040lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) 1053lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1041{ 1054{
1042 if ((fab_name[0] == 1055 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1043 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) && 1056 return 0;
1044 (fab_name[1] == 1057 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1045 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) && 1058 return 0;
1046 (fab_name[2] == 1059 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1047 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) && 1060 return 0;
1048 (fab_name[3] == 1061 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1049 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) && 1062 return 0;
1050 (fab_name[4] == 1063 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1051 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) && 1064 return 0;
1052 (fab_name[5] == 1065 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1053 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) && 1066 return 0;
1054 (fab_name[6] == 1067 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1055 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) && 1068 return 0;
1056 (fab_name[7] == 1069 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1057 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1058 return 1;
1059 else
1060 return 0; 1070 return 0;
1071 return 1;
1061} 1072}
1062 1073
1063/** 1074/**
@@ -1072,30 +1083,28 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1072static uint32_t 1083static uint32_t
1073lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) 1084lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1074{ 1085{
1075 if ((sw_name[0] == 1086 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1076 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) && 1087 return 0;
1077 (sw_name[1] == 1088 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1078 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) && 1089 return 0;
1079 (sw_name[2] == 1090 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1080 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) && 1091 return 0;
1081 (sw_name[3] == 1092 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1082 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) && 1093 return 0;
1083 (sw_name[4] == 1094 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1084 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
1085 (sw_name[5] ==
1086 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
1087 (sw_name[6] ==
1088 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
1089 (sw_name[7] ==
1090 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
1091 return 1;
1092 else
1093 return 0; 1095 return 0;
1096 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1097 return 0;
1098 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1099 return 0;
1100 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1101 return 0;
1102 return 1;
1094} 1103}
1095 1104
1096/** 1105/**
1097 * lpfc_mac_addr_match - Check if the fcf mac address match. 1106 * lpfc_mac_addr_match - Check if the fcf mac address match.
1098 * @phba: pointer to lpfc hba data structure. 1107 * @mac_addr: pointer to mac address.
1099 * @new_fcf_record: pointer to fcf record. 1108 * @new_fcf_record: pointer to fcf record.
1100 * 1109 *
1101 * This routine compare the fcf record's mac address with HBA's 1110 * This routine compare the fcf record's mac address with HBA's
@@ -1103,85 +1112,115 @@ lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1103 * returns 1 else return 0. 1112 * returns 1 else return 0.
1104 **/ 1113 **/
1105static uint32_t 1114static uint32_t
1106lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) 1115lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1107{ 1116{
1108 if ((phba->fcf.mac_addr[0] == 1117 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1109 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) && 1118 return 0;
1110 (phba->fcf.mac_addr[1] == 1119 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1111 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1112 (phba->fcf.mac_addr[2] ==
1113 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1114 (phba->fcf.mac_addr[3] ==
1115 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1116 (phba->fcf.mac_addr[4] ==
1117 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1118 (phba->fcf.mac_addr[5] ==
1119 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1120 return 1;
1121 else
1122 return 0; 1120 return 0;
1121 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1122 return 0;
1123 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1124 return 0;
1125 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1126 return 0;
1127 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1128 return 0;
1129 return 1;
1130}
1131
1132static bool
1133lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1134{
1135 return (curr_vlan_id == new_vlan_id);
1123} 1136}
1124 1137
1125/** 1138/**
1126 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1139 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1127 * @phba: pointer to lpfc hba data structure. 1140 * @fcf: pointer to driver fcf record.
1128 * @new_fcf_record: pointer to fcf record. 1141 * @new_fcf_record: pointer to fcf record.
1129 * 1142 *
1130 * This routine copies the FCF information from the FCF 1143 * This routine copies the FCF information from the FCF
1131 * record to lpfc_hba data structure. 1144 * record to lpfc_hba data structure.
1132 **/ 1145 **/
1133static void 1146static void
1134lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) 1147lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1148 struct fcf_record *new_fcf_record)
1135{ 1149{
1136 phba->fcf.fabric_name[0] = 1150 /* Fabric name */
1151 fcf_rec->fabric_name[0] =
1137 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); 1152 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1138 phba->fcf.fabric_name[1] = 1153 fcf_rec->fabric_name[1] =
1139 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); 1154 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1140 phba->fcf.fabric_name[2] = 1155 fcf_rec->fabric_name[2] =
1141 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); 1156 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1142 phba->fcf.fabric_name[3] = 1157 fcf_rec->fabric_name[3] =
1143 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); 1158 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1144 phba->fcf.fabric_name[4] = 1159 fcf_rec->fabric_name[4] =
1145 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); 1160 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1146 phba->fcf.fabric_name[5] = 1161 fcf_rec->fabric_name[5] =
1147 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); 1162 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1148 phba->fcf.fabric_name[6] = 1163 fcf_rec->fabric_name[6] =
1149 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); 1164 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1150 phba->fcf.fabric_name[7] = 1165 fcf_rec->fabric_name[7] =
1151 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); 1166 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1152 phba->fcf.mac_addr[0] = 1167 /* Mac address */
1153 bf_get(lpfc_fcf_record_mac_0, new_fcf_record); 1168 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1154 phba->fcf.mac_addr[1] = 1169 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1155 bf_get(lpfc_fcf_record_mac_1, new_fcf_record); 1170 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1156 phba->fcf.mac_addr[2] = 1171 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1157 bf_get(lpfc_fcf_record_mac_2, new_fcf_record); 1172 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1158 phba->fcf.mac_addr[3] = 1173 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1159 bf_get(lpfc_fcf_record_mac_3, new_fcf_record); 1174 /* FCF record index */
1160 phba->fcf.mac_addr[4] = 1175 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1161 bf_get(lpfc_fcf_record_mac_4, new_fcf_record); 1176 /* FCF record priority */
1162 phba->fcf.mac_addr[5] = 1177 fcf_rec->priority = new_fcf_record->fip_priority;
1163 bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1178 /* Switch name */
1164 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1179 fcf_rec->switch_name[0] =
1165 phba->fcf.priority = new_fcf_record->fip_priority;
1166 phba->fcf.switch_name[0] =
1167 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); 1180 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1168 phba->fcf.switch_name[1] = 1181 fcf_rec->switch_name[1] =
1169 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); 1182 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1170 phba->fcf.switch_name[2] = 1183 fcf_rec->switch_name[2] =
1171 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); 1184 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1172 phba->fcf.switch_name[3] = 1185 fcf_rec->switch_name[3] =
1173 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); 1186 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1174 phba->fcf.switch_name[4] = 1187 fcf_rec->switch_name[4] =
1175 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); 1188 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1176 phba->fcf.switch_name[5] = 1189 fcf_rec->switch_name[5] =
1177 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); 1190 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1178 phba->fcf.switch_name[6] = 1191 fcf_rec->switch_name[6] =
1179 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); 1192 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1180 phba->fcf.switch_name[7] = 1193 fcf_rec->switch_name[7] =
1181 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); 1194 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1182} 1195}
1183 1196
1184/** 1197/**
1198 * lpfc_update_fcf_record - Update driver fcf record
1199 * @phba: pointer to lpfc hba data structure.
1200 * @fcf_rec: pointer to driver fcf record.
1201 * @new_fcf_record: pointer to hba fcf record.
1202 * @addr_mode: address mode to be set to the driver fcf record.
1203 * @vlan_id: vlan tag to be set to the driver fcf record.
1204 * @flag: flag bits to be set to the driver fcf record.
1205 *
1206 * This routine updates the driver FCF record from the new HBA FCF record
1207 * together with the address mode, vlan_id, and other informations. This
1208 * routine is called with the host lock held.
1209 **/
1210static void
1211__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1212 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1213 uint16_t vlan_id, uint32_t flag)
1214{
1215 /* Copy the fields from the HBA's FCF record */
1216 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1217 /* Update other fields of driver FCF record */
1218 fcf_rec->addr_mode = addr_mode;
1219 fcf_rec->vlan_id = vlan_id;
1220 fcf_rec->flag |= (flag | RECORD_VALID);
1221}
1222
1223/**
1185 * lpfc_register_fcf - Register the FCF with hba. 1224 * lpfc_register_fcf - Register the FCF with hba.
1186 * @phba: pointer to lpfc hba data structure. 1225 * @phba: pointer to lpfc hba data structure.
1187 * 1226 *
@@ -1199,13 +1238,14 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1199 1238
1200 /* If the FCF is not availabe do nothing. */ 1239 /* If the FCF is not availabe do nothing. */
1201 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1240 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1241 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1202 spin_unlock_irqrestore(&phba->hbalock, flags); 1242 spin_unlock_irqrestore(&phba->hbalock, flags);
1203 return; 1243 return;
1204 } 1244 }
1205 1245
1206 /* The FCF is already registered, start discovery */ 1246 /* The FCF is already registered, start discovery */
1207 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1247 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1208 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1248 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1209 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1249 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1210 spin_unlock_irqrestore(&phba->hbalock, flags); 1250 spin_unlock_irqrestore(&phba->hbalock, flags);
1211 if (phba->pport->port_state != LPFC_FLOGI) 1251 if (phba->pport->port_state != LPFC_FLOGI)
@@ -1216,15 +1256,23 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1216 1256
1217 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1257 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1218 GFP_KERNEL); 1258 GFP_KERNEL);
1219 if (!fcf_mbxq) 1259 if (!fcf_mbxq) {
1260 spin_lock_irqsave(&phba->hbalock, flags);
1261 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1262 spin_unlock_irqrestore(&phba->hbalock, flags);
1220 return; 1263 return;
1264 }
1221 1265
1222 lpfc_reg_fcfi(phba, fcf_mbxq); 1266 lpfc_reg_fcfi(phba, fcf_mbxq);
1223 fcf_mbxq->vport = phba->pport; 1267 fcf_mbxq->vport = phba->pport;
1224 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1268 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1225 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1269 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1226 if (rc == MBX_NOT_FINISHED) 1270 if (rc == MBX_NOT_FINISHED) {
1271 spin_lock_irqsave(&phba->hbalock, flags);
1272 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1273 spin_unlock_irqrestore(&phba->hbalock, flags);
1227 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1274 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1275 }
1228 1276
1229 return; 1277 return;
1230} 1278}
@@ -1235,6 +1283,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1235 * @new_fcf_record: pointer to fcf record. 1283 * @new_fcf_record: pointer to fcf record.
1236 * @boot_flag: Indicates if this record used by boot bios. 1284 * @boot_flag: Indicates if this record used by boot bios.
1237 * @addr_mode: The address mode to be used by this FCF 1285 * @addr_mode: The address mode to be used by this FCF
1286 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1238 * 1287 *
1239 * This routine compare the fcf record with connect list obtained from the 1288 * This routine compare the fcf record with connect list obtained from the
1240 * config region to decide if this FCF can be used for SAN discovery. It returns 1289 * config region to decide if this FCF can be used for SAN discovery. It returns
@@ -1253,13 +1302,27 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1253 uint16_t *vlan_id) 1302 uint16_t *vlan_id)
1254{ 1303{
1255 struct lpfc_fcf_conn_entry *conn_entry; 1304 struct lpfc_fcf_conn_entry *conn_entry;
1305 int i, j, fcf_vlan_id = 0;
1306
1307 /* Find the lowest VLAN id in the FCF record */
1308 for (i = 0; i < 512; i++) {
1309 if (new_fcf_record->vlan_bitmap[i]) {
1310 fcf_vlan_id = i * 8;
1311 j = 0;
1312 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1313 j++;
1314 fcf_vlan_id++;
1315 }
1316 break;
1317 }
1318 }
1256 1319
1257 /* If FCF not available return 0 */ 1320 /* If FCF not available return 0 */
1258 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1321 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1259 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) 1322 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1260 return 0; 1323 return 0;
1261 1324
1262 if (!phba->cfg_enable_fip) { 1325 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1263 *boot_flag = 0; 1326 *boot_flag = 0;
1264 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1327 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1265 new_fcf_record); 1328 new_fcf_record);
@@ -1286,11 +1349,16 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1286 if (*addr_mode & LPFC_FCF_FPMA) 1349 if (*addr_mode & LPFC_FCF_FPMA)
1287 *addr_mode = LPFC_FCF_FPMA; 1350 *addr_mode = LPFC_FCF_FPMA;
1288 1351
1289 *vlan_id = 0xFFFF; 1352 /* If FCF record report a vlan id use that vlan id */
1353 if (fcf_vlan_id)
1354 *vlan_id = fcf_vlan_id;
1355 else
1356 *vlan_id = 0xFFFF;
1290 return 1; 1357 return 1;
1291 } 1358 }
1292 1359
1293 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { 1360 list_for_each_entry(conn_entry,
1361 &phba->fcf_conn_rec_list, list) {
1294 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) 1362 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1295 continue; 1363 continue;
1296 1364
@@ -1384,8 +1452,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1384 (*addr_mode & LPFC_FCF_FPMA)) 1452 (*addr_mode & LPFC_FCF_FPMA))
1385 *addr_mode = LPFC_FCF_FPMA; 1453 *addr_mode = LPFC_FCF_FPMA;
1386 1454
1455 /* If matching connect list has a vlan id, use it */
1387 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1456 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1388 *vlan_id = conn_entry->conn_rec.vlan_tag; 1457 *vlan_id = conn_entry->conn_rec.vlan_tag;
1458 /*
1459 * If no vlan id is specified in connect list, use the vlan id
1460 * in the FCF record
1461 */
1462 else if (fcf_vlan_id)
1463 *vlan_id = fcf_vlan_id;
1389 else 1464 else
1390 *vlan_id = 0xFFFF; 1465 *vlan_id = 0xFFFF;
1391 1466
@@ -1407,8 +1482,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1407int 1482int
1408lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 1483lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1409{ 1484{
1410 LPFC_MBOXQ_t *mbox;
1411 int rc;
1412 /* 1485 /*
1413 * If the Link is up and no FCoE events while in the 1486 * If the Link is up and no FCoE events while in the
1414 * FCF discovery, no need to restart FCF discovery. 1487 * FCF discovery, no need to restart FCF discovery.
@@ -1417,75 +1490,70 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1417 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1490 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1418 return 0; 1491 return 0;
1419 1492
1493 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1494 "2768 Pending link or FCF event during current "
1495 "handling of the previous event: link_state:x%x, "
1496 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1497 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1498 phba->fcoe_eventtag);
1499
1420 spin_lock_irq(&phba->hbalock); 1500 spin_lock_irq(&phba->hbalock);
1421 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 1501 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1422 spin_unlock_irq(&phba->hbalock); 1502 spin_unlock_irq(&phba->hbalock);
1423 1503
1424 if (phba->link_state >= LPFC_LINK_UP) 1504 if (phba->link_state >= LPFC_LINK_UP) {
1425 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 1505 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1506 "2780 Restart FCF table scan due to "
1507 "pending FCF event:evt_tag_at_scan:x%x, "
1508 "evt_tag_current:x%x\n",
1509 phba->fcoe_eventtag_at_fcf_scan,
1510 phba->fcoe_eventtag);
1511 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1512 } else {
1513 /*
1514 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1515 * flag
1516 */
1517 spin_lock_irq(&phba->hbalock);
1518 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1519 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1520 spin_unlock_irq(&phba->hbalock);
1521 }
1426 1522
1523 /* Unregister the currently registered FCF if required */
1427 if (unreg_fcf) { 1524 if (unreg_fcf) {
1428 spin_lock_irq(&phba->hbalock); 1525 spin_lock_irq(&phba->hbalock);
1429 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 1526 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1430 spin_unlock_irq(&phba->hbalock); 1527 spin_unlock_irq(&phba->hbalock);
1431 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1528 lpfc_sli4_unregister_fcf(phba);
1432 if (!mbox) {
1433 lpfc_printf_log(phba, KERN_ERR,
1434 LOG_DISCOVERY|LOG_MBOX,
1435 "2610 UNREG_FCFI mbox allocation failed\n");
1436 return 1;
1437 }
1438 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
1439 mbox->vport = phba->pport;
1440 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
1441 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1442 if (rc == MBX_NOT_FINISHED) {
1443 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
1444 "2611 UNREG_FCFI issue mbox failed\n");
1445 mempool_free(mbox, phba->mbox_mem_pool);
1446 }
1447 } 1529 }
1448
1449 return 1; 1530 return 1;
1450} 1531}
1451 1532
1452/** 1533/**
1453 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1534 * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
1454 * @phba: pointer to lpfc hba data structure. 1535 * @phba: pointer to lpfc hba data structure.
1455 * @mboxq: pointer to mailbox object. 1536 * @mboxq: pointer to mailbox object.
1537 * @next_fcf_index: pointer to holder of next fcf index.
1456 * 1538 *
1457 * This function iterate through all the fcf records available in 1539 * This routine parses the non-embedded fcf mailbox command by performing the
1458 * HBA and choose the optimal FCF record for discovery. After finding 1540 * necessarily error checking, non-embedded read FCF record mailbox command
1459 * the FCF for discovery it register the FCF record and kick start 1541 * SGE parsing, and endianness swapping.
1460 * discovery. 1542 *
1461 * If FCF_IN_USE flag is set in currently used FCF, the routine try to 1543 * Returns the pointer to the new FCF record in the non-embedded mailbox
1462 * use a FCF record which match fabric name and mac address of the 1544 * command DMA memory if successfully, other NULL.
1463 * currently used FCF record.
1464 * If the driver support only one FCF, it will try to use the FCF record
1465 * used by BOOT_BIOS.
1466 */ 1545 */
1467void 1546static struct fcf_record *
1468lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1547lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1548 uint16_t *next_fcf_index)
1469{ 1549{
1470 void *virt_addr; 1550 void *virt_addr;
1471 dma_addr_t phys_addr; 1551 dma_addr_t phys_addr;
1472 uint8_t *bytep;
1473 struct lpfc_mbx_sge sge; 1552 struct lpfc_mbx_sge sge;
1474 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1553 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1475 uint32_t shdr_status, shdr_add_status; 1554 uint32_t shdr_status, shdr_add_status;
1476 union lpfc_sli4_cfg_shdr *shdr; 1555 union lpfc_sli4_cfg_shdr *shdr;
1477 struct fcf_record *new_fcf_record; 1556 struct fcf_record *new_fcf_record;
1478 int rc;
1479 uint32_t boot_flag, addr_mode;
1480 uint32_t next_fcf_index;
1481 unsigned long flags;
1482 uint16_t vlan_id;
1483
1484 /* If there is pending FCoE event restart FCF table scan */
1485 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1486 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1487 return;
1488 }
1489 1557
1490 /* Get the first SGE entry from the non-embedded DMA memory. This 1558 /* Get the first SGE entry from the non-embedded DMA memory. This
1491 * routine only uses a single SGE. 1559 * routine only uses a single SGE.
@@ -1496,134 +1564,359 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1496 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1564 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1497 "2524 Failed to get the non-embedded SGE " 1565 "2524 Failed to get the non-embedded SGE "
1498 "virtual address\n"); 1566 "virtual address\n");
1499 goto out; 1567 return NULL;
1500 } 1568 }
1501 virt_addr = mboxq->sge_array->addr[0]; 1569 virt_addr = mboxq->sge_array->addr[0];
1502 1570
1503 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1571 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1504 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1572 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1505 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 1573 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1506 &shdr->response);
1507 /*
1508 * The FCF Record was read and there is no reason for the driver
1509 * to maintain the FCF record data or memory. Instead, just need
1510 * to book keeping the FCFIs can be used.
1511 */
1512 if (shdr_status || shdr_add_status) { 1574 if (shdr_status || shdr_add_status) {
1513 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1575 if (shdr_status == STATUS_FCF_TABLE_EMPTY)
1514 "2521 READ_FCF_RECORD mailbox failed " 1576 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1515 "with status x%x add_status x%x, mbx\n", 1577 "2726 READ_FCF_RECORD Indicates empty "
1516 shdr_status, shdr_add_status); 1578 "FCF table.\n");
1517 goto out; 1579 else
1580 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1581 "2521 READ_FCF_RECORD mailbox failed "
1582 "with status x%x add_status x%x, "
1583 "mbx\n", shdr_status, shdr_add_status);
1584 return NULL;
1518 } 1585 }
1519 /* Interpreting the returned information of FCF records */ 1586
1587 /* Interpreting the returned information of the FCF record */
1520 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 1588 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1521 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 1589 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1522 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1590 sizeof(struct lpfc_mbx_read_fcf_tbl));
1523 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1591 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1524
1525 new_fcf_record = (struct fcf_record *)(virt_addr + 1592 new_fcf_record = (struct fcf_record *)(virt_addr +
1526 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1593 sizeof(struct lpfc_mbx_read_fcf_tbl));
1527 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1594 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1528 sizeof(struct fcf_record)); 1595 sizeof(struct fcf_record));
1529 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1530 1596
1531 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, 1597 return new_fcf_record;
1532 &boot_flag, &addr_mode, 1598}
1533 &vlan_id); 1599
1600/**
1601 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1602 * @phba: pointer to lpfc hba data structure.
1603 * @fcf_record: pointer to the fcf record.
1604 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1605 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1606 *
1607 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1608 * enabled.
1609 **/
1610static void
1611lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1612 struct fcf_record *fcf_record,
1613 uint16_t vlan_id,
1614 uint16_t next_fcf_index)
1615{
1616 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1617 "2764 READ_FCF_RECORD:\n"
1618 "\tFCF_Index : x%x\n"
1619 "\tFCF_Avail : x%x\n"
1620 "\tFCF_Valid : x%x\n"
1621 "\tFIP_Priority : x%x\n"
1622 "\tMAC_Provider : x%x\n"
1623 "\tLowest VLANID : x%x\n"
1624 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1625 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1626 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1627 "\tNext_FCF_Index: x%x\n",
1628 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1629 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1630 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1631 fcf_record->fip_priority,
1632 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1633 vlan_id,
1634 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1635 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1636 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1637 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1638 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1639 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1640 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1641 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1642 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1643 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1644 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1645 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1646 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1647 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1648 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1649 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1650 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1651 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1652 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1653 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1654 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1655 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1656 next_fcf_index);
1657}
1658
1659/**
1660 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1661 * @phba: pointer to lpfc hba data structure.
1662 * @mboxq: pointer to mailbox object.
1663 *
1664 * This function iterates through all the fcf records available in
1665 * HBA and chooses the optimal FCF record for discovery. After finding
1666 * the FCF for discovery it registers the FCF record and kicks start
1667 * discovery.
1668 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1669 * use an FCF record which matches fabric name and mac address of the
1670 * currently used FCF record.
1671 * If the driver supports only one FCF, it will try to use the FCF record
1672 * used by BOOT_BIOS.
1673 */
1674void
1675lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1676{
1677 struct fcf_record *new_fcf_record;
1678 uint32_t boot_flag, addr_mode;
1679 uint16_t fcf_index, next_fcf_index;
1680 struct lpfc_fcf_rec *fcf_rec = NULL;
1681 uint16_t vlan_id;
1682 int rc;
1683
1684 /* If there is pending FCoE event restart FCF table scan */
1685 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1686 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1687 return;
1688 }
1689
1690 /* Parse the FCF record from the non-embedded mailbox command */
1691 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1692 &next_fcf_index);
1693 if (!new_fcf_record) {
1694 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1695 "2765 Mailbox command READ_FCF_RECORD "
1696 "failed to retrieve a FCF record.\n");
1697 /* Let next new FCF event trigger fast failover */
1698 spin_lock_irq(&phba->hbalock);
1699 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1700 spin_unlock_irq(&phba->hbalock);
1701 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1702 return;
1703 }
1704
1705 /* Check the FCF record against the connection list */
1706 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1707 &addr_mode, &vlan_id);
1708
1709 /* Log the FCF record information if turned on */
1710 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1711 next_fcf_index);
1712
1534 /* 1713 /*
1535 * If the fcf record does not match with connect list entries 1714 * If the fcf record does not match with connect list entries
1536 * read the next entry. 1715 * read the next entry; otherwise, this is an eligible FCF
1716 * record for round robin FCF failover.
1537 */ 1717 */
1538 if (!rc) 1718 if (!rc) {
1719 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1720 "2781 FCF record fcf_index:x%x failed FCF "
1721 "connection list check, fcf_avail:x%x, "
1722 "fcf_valid:x%x\n",
1723 bf_get(lpfc_fcf_record_fcf_index,
1724 new_fcf_record),
1725 bf_get(lpfc_fcf_record_fcf_avail,
1726 new_fcf_record),
1727 bf_get(lpfc_fcf_record_fcf_valid,
1728 new_fcf_record));
1539 goto read_next_fcf; 1729 goto read_next_fcf;
1730 } else {
1731 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1732 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
1733 if (rc)
1734 goto read_next_fcf;
1735 }
1736
1540 /* 1737 /*
1541 * If this is not the first FCF discovery of the HBA, use last 1738 * If this is not the first FCF discovery of the HBA, use last
1542 * FCF record for the discovery. 1739 * FCF record for the discovery. The condition that a rescan
1740 * matches the in-use FCF record: fabric name, switch name, mac
1741 * address, and vlan_id.
1543 */ 1742 */
1544 spin_lock_irqsave(&phba->hbalock, flags); 1743 spin_lock_irq(&phba->hbalock);
1545 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1744 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1546 if (lpfc_fab_name_match(phba->fcf.fabric_name, 1745 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
1746 new_fcf_record) &&
1747 lpfc_sw_name_match(phba->fcf.current_rec.switch_name,
1547 new_fcf_record) && 1748 new_fcf_record) &&
1548 lpfc_sw_name_match(phba->fcf.switch_name, 1749 lpfc_mac_addr_match(phba->fcf.current_rec.mac_addr,
1549 new_fcf_record) && 1750 new_fcf_record) &&
1550 lpfc_mac_addr_match(phba, new_fcf_record)) { 1751 lpfc_vlan_id_match(phba->fcf.current_rec.vlan_id,
1752 vlan_id)) {
1551 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1753 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1552 spin_unlock_irqrestore(&phba->hbalock, flags); 1754 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
1755 /* Stop FCF redisc wait timer if pending */
1756 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
1757 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1758 /* If in fast failover, mark it's completed */
1759 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
1760 FCF_DISCOVERY);
1761 spin_unlock_irq(&phba->hbalock);
1553 goto out; 1762 goto out;
1554 } 1763 }
1555 spin_unlock_irqrestore(&phba->hbalock, flags); 1764 /*
1556 goto read_next_fcf; 1765 * Read next FCF record from HBA searching for the matching
1766 * with in-use record only if not during the fast failover
1767 * period. In case of fast failover period, it shall try to
1768 * determine whether the FCF record just read should be the
1769 * next candidate.
1770 */
1771 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1772 spin_unlock_irq(&phba->hbalock);
1773 goto read_next_fcf;
1774 }
1557 } 1775 }
1776 /*
1777 * Update on failover FCF record only if it's in FCF fast-failover
1778 * period; otherwise, update on current FCF record.
1779 */
1780 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1781 fcf_rec = &phba->fcf.failover_rec;
1782 else
1783 fcf_rec = &phba->fcf.current_rec;
1784
1558 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 1785 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1559 /* 1786 /*
1560 * If the current FCF record does not have boot flag 1787 * If the driver FCF record does not have boot flag
1561 * set and new fcf record has boot flag set, use the 1788 * set and new hba fcf record has boot flag set, use
1562 * new fcf record. 1789 * the new hba fcf record.
1563 */ 1790 */
1564 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { 1791 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
1565 /* Use this FCF record */ 1792 /* Choose this FCF record */
1566 lpfc_copy_fcf_record(phba, new_fcf_record); 1793 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1567 phba->fcf.addr_mode = addr_mode; 1794 addr_mode, vlan_id, BOOT_ENABLE);
1568 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; 1795 spin_unlock_irq(&phba->hbalock);
1569 if (vlan_id != 0xFFFF) {
1570 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1571 phba->fcf.vlan_id = vlan_id;
1572 }
1573 spin_unlock_irqrestore(&phba->hbalock, flags);
1574 goto read_next_fcf; 1796 goto read_next_fcf;
1575 } 1797 }
1576 /* 1798 /*
1577 * If the current FCF record has boot flag set and the 1799 * If the driver FCF record has boot flag set and the
1578 * new FCF record does not have boot flag, read the next 1800 * new hba FCF record does not have boot flag, read
1579 * FCF record. 1801 * the next FCF record.
1580 */ 1802 */
1581 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { 1803 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
1582 spin_unlock_irqrestore(&phba->hbalock, flags); 1804 spin_unlock_irq(&phba->hbalock);
1583 goto read_next_fcf; 1805 goto read_next_fcf;
1584 } 1806 }
1585 /* 1807 /*
1586 * If there is a record with lower priority value for 1808 * If the new hba FCF record has lower priority value
1587 * the current FCF, use that record. 1809 * than the driver FCF record, use the new record.
1588 */ 1810 */
1589 if (lpfc_fab_name_match(phba->fcf.fabric_name, 1811 if (new_fcf_record->fip_priority < fcf_rec->priority) {
1590 new_fcf_record) && 1812 /* Choose this FCF record */
1591 (new_fcf_record->fip_priority < phba->fcf.priority)) { 1813 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1592 /* Use this FCF record */ 1814 addr_mode, vlan_id, 0);
1593 lpfc_copy_fcf_record(phba, new_fcf_record);
1594 phba->fcf.addr_mode = addr_mode;
1595 if (vlan_id != 0xFFFF) {
1596 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1597 phba->fcf.vlan_id = vlan_id;
1598 }
1599 spin_unlock_irqrestore(&phba->hbalock, flags);
1600 goto read_next_fcf;
1601 } 1815 }
1602 spin_unlock_irqrestore(&phba->hbalock, flags); 1816 spin_unlock_irq(&phba->hbalock);
1603 goto read_next_fcf; 1817 goto read_next_fcf;
1604 } 1818 }
1605 /* 1819 /*
1606 * This is the first available FCF record, use this 1820 * This is the first suitable FCF record, choose this record for
1607 * record. 1821 * initial best-fit FCF.
1608 */ 1822 */
1609 lpfc_copy_fcf_record(phba, new_fcf_record); 1823 if (fcf_rec) {
1610 phba->fcf.addr_mode = addr_mode; 1824 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1611 if (boot_flag) 1825 addr_mode, vlan_id, (boot_flag ?
1612 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; 1826 BOOT_ENABLE : 0));
1613 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1827 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1614 if (vlan_id != 0xFFFF) {
1615 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1616 phba->fcf.vlan_id = vlan_id;
1617 } 1828 }
1618 spin_unlock_irqrestore(&phba->hbalock, flags); 1829 spin_unlock_irq(&phba->hbalock);
1619 goto read_next_fcf; 1830 goto read_next_fcf;
1620 1831
1621read_next_fcf: 1832read_next_fcf:
1622 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1833 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1623 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) 1834 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
1624 lpfc_register_fcf(phba); 1835 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
1625 else 1836 /*
1626 lpfc_sli4_read_fcf_record(phba, next_fcf_index); 1837 * Case of FCF fast failover scan
1838 */
1839
1840 /*
1841 * It has not found any suitable FCF record, cancel
1842 * FCF scan inprogress, and do nothing
1843 */
1844 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
1845 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1846 "2782 No suitable FCF record "
1847 "found during this round of "
1848 "post FCF rediscovery scan: "
1849 "fcf_evt_tag:x%x, fcf_index: "
1850 "x%x\n",
1851 phba->fcoe_eventtag_at_fcf_scan,
1852 bf_get(lpfc_fcf_record_fcf_index,
1853 new_fcf_record));
1854 /*
1855 * Let next new FCF event trigger fast
1856 * failover
1857 */
1858 spin_lock_irq(&phba->hbalock);
1859 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1860 spin_unlock_irq(&phba->hbalock);
1861 return;
1862 }
1863 /*
1864 * It has found a suitable FCF record that is not
1865 * the same as in-use FCF record, unregister the
1866 * in-use FCF record, replace the in-use FCF record
1867 * with the new FCF record, mark FCF fast failover
1868 * completed, and then start register the new FCF
1869 * record.
1870 */
1871
1872 /* Unregister the current in-use FCF record */
1873 lpfc_unregister_fcf(phba);
1874
1875 /* Replace in-use record with the new record */
1876 memcpy(&phba->fcf.current_rec,
1877 &phba->fcf.failover_rec,
1878 sizeof(struct lpfc_fcf_rec));
1879 /* mark the FCF fast failover completed */
1880 spin_lock_irq(&phba->hbalock);
1881 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
1882 spin_unlock_irq(&phba->hbalock);
1883 /*
1884 * Set up the initial registered FCF index for FLOGI
1885 * round robin FCF failover.
1886 */
1887 phba->fcf.fcf_rr_init_indx =
1888 phba->fcf.failover_rec.fcf_indx;
1889 /* Register to the new FCF record */
1890 lpfc_register_fcf(phba);
1891 } else {
1892 /*
1893 * In case of transaction period to fast FCF failover,
1894 * do nothing when search to the end of the FCF table.
1895 */
1896 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
1897 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
1898 return;
1899 /*
1900 * Otherwise, initial scan or post linkdown rescan,
1901 * register with the best FCF record found so far
1902 * through the FCF scanning process.
1903 */
1904
1905 /* mark the initial FCF discovery completed */
1906 spin_lock_irq(&phba->hbalock);
1907 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
1908 spin_unlock_irq(&phba->hbalock);
1909 /*
1910 * Set up the initial registered FCF index for FLOGI
1911 * round robin FCF failover
1912 */
1913 phba->fcf.fcf_rr_init_indx =
1914 phba->fcf.current_rec.fcf_indx;
1915 /* Register to the new FCF record */
1916 lpfc_register_fcf(phba);
1917 }
1918 } else
1919 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
1627 return; 1920 return;
1628 1921
1629out: 1922out:
@@ -1634,16 +1927,154 @@ out:
1634} 1927}
1635 1928
1636/** 1929/**
1930 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
1931 * @phba: pointer to lpfc hba data structure.
1932 * @mboxq: pointer to mailbox object.
1933 *
1934 * This is the callback function for FLOGI failure round robin FCF failover
1935 * read FCF record mailbox command from the eligible FCF record bmask for
1936 * performing the failover. If the FCF read back is not valid/available, it
1937 * fails through to retrying FLOGI to the currently registered FCF again.
1938 * Otherwise, if the FCF read back is valid and available, it will set the
1939 * newly read FCF record to the failover FCF record, unregister currently
1940 * registered FCF record, copy the failover FCF record to the current
1941 * FCF record, and then register the current FCF record before proceeding
1942 * to trying FLOGI on the new failover FCF.
1943 */
1944void
1945lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1946{
1947 struct fcf_record *new_fcf_record;
1948 uint32_t boot_flag, addr_mode;
1949 uint16_t next_fcf_index;
1950 uint16_t current_fcf_index;
1951 uint16_t vlan_id;
1952
1953 /* If link state is not up, stop the round robin failover process */
1954 if (phba->link_state < LPFC_LINK_UP) {
1955 spin_lock_irq(&phba->hbalock);
1956 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1957 spin_unlock_irq(&phba->hbalock);
1958 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1959 return;
1960 }
1961
1962 /* Parse the FCF record from the non-embedded mailbox command */
1963 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1964 &next_fcf_index);
1965 if (!new_fcf_record) {
1966 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1967 "2766 Mailbox command READ_FCF_RECORD "
1968 "failed to retrieve a FCF record.\n");
1969 goto out;
1970 }
1971
1972 /* Get the needed parameters from FCF record */
1973 lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1974 &addr_mode, &vlan_id);
1975
1976 /* Log the FCF record information if turned on */
1977 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1978 next_fcf_index);
1979
1980 /* Upload new FCF record to the failover FCF record */
1981 spin_lock_irq(&phba->hbalock);
1982 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
1983 new_fcf_record, addr_mode, vlan_id,
1984 (boot_flag ? BOOT_ENABLE : 0));
1985 spin_unlock_irq(&phba->hbalock);
1986
1987 current_fcf_index = phba->fcf.current_rec.fcf_indx;
1988
1989 /* Unregister the current in-use FCF record */
1990 lpfc_unregister_fcf(phba);
1991
1992 /* Replace in-use record with the new record */
1993 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
1994 sizeof(struct lpfc_fcf_rec));
1995
1996 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1997 "2783 FLOGI round robin FCF failover from FCF "
1998 "(index:x%x) to FCF (index:x%x).\n",
1999 current_fcf_index,
2000 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2001
2002out:
2003 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2004 lpfc_register_fcf(phba);
2005}
2006
2007/**
2008 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2009 * @phba: pointer to lpfc hba data structure.
2010 * @mboxq: pointer to mailbox object.
2011 *
2012 * This is the callback function of read FCF record mailbox command for
2013 * updating the eligible FCF bmask for FLOGI failure round robin FCF
2014 * failover when a new FCF event happened. If the FCF read back is
2015 * valid/available and it passes the connection list check, it updates
2016 * the bmask for the eligible FCF record for round robin failover.
2017 */
2018void
2019lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2020{
2021 struct fcf_record *new_fcf_record;
2022 uint32_t boot_flag, addr_mode;
2023 uint16_t fcf_index, next_fcf_index;
2024 uint16_t vlan_id;
2025 int rc;
2026
2027 /* If link state is not up, no need to proceed */
2028 if (phba->link_state < LPFC_LINK_UP)
2029 goto out;
2030
2031 /* If FCF discovery period is over, no need to proceed */
2032 if (phba->fcf.fcf_flag & FCF_DISCOVERY)
2033 goto out;
2034
2035 /* Parse the FCF record from the non-embedded mailbox command */
2036 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2037 &next_fcf_index);
2038 if (!new_fcf_record) {
2039 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2040 "2767 Mailbox command READ_FCF_RECORD "
2041 "failed to retrieve a FCF record.\n");
2042 goto out;
2043 }
2044
2045 /* Check the connection list for eligibility */
2046 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2047 &addr_mode, &vlan_id);
2048
2049 /* Log the FCF record information if turned on */
2050 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2051 next_fcf_index);
2052
2053 if (!rc)
2054 goto out;
2055
2056 /* Update the eligible FCF record index bmask */
2057 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2058 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
2059
2060out:
2061 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2062}
2063
2064/**
1637 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 2065 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
1638 * @phba: pointer to lpfc hba data structure. 2066 * @phba: pointer to lpfc hba data structure.
1639 * @mboxq: pointer to mailbox data structure. 2067 * @mboxq: pointer to mailbox data structure.
1640 * 2068 *
1641 * This function handles completion of init vpi mailbox command. 2069 * This function handles completion of init vpi mailbox command.
1642 */ 2070 */
1643static void 2071void
1644lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2072lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1645{ 2073{
1646 struct lpfc_vport *vport = mboxq->vport; 2074 struct lpfc_vport *vport = mboxq->vport;
2075 struct lpfc_nodelist *ndlp;
2076 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2077
1647 if (mboxq->u.mb.mbxStatus) { 2078 if (mboxq->u.mb.mbxStatus) {
1648 lpfc_printf_vlog(vport, KERN_ERR, 2079 lpfc_printf_vlog(vport, KERN_ERR,
1649 LOG_MBOX, 2080 LOG_MBOX,
@@ -1653,20 +2084,67 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1653 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2084 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1654 return; 2085 return;
1655 } 2086 }
2087 spin_lock_irq(shost->host_lock);
1656 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 2088 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2089 spin_unlock_irq(shost->host_lock);
2090
2091 /* If this port is physical port or FDISC is done, do reg_vpi */
2092 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2093 ndlp = lpfc_findnode_did(vport, Fabric_DID);
2094 if (!ndlp)
2095 lpfc_printf_vlog(vport, KERN_ERR,
2096 LOG_DISCOVERY,
2097 "2731 Cannot find fabric "
2098 "controller node\n");
2099 else
2100 lpfc_register_new_vport(phba, vport, ndlp);
2101 mempool_free(mboxq, phba->mbox_mem_pool);
2102 return;
2103 }
1657 2104
1658 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 2105 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1659 lpfc_initial_fdisc(vport); 2106 lpfc_initial_fdisc(vport);
1660 else { 2107 else {
1661 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 2108 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
1662 lpfc_printf_vlog(vport, KERN_ERR, 2109 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1663 LOG_ELS, 2110 "2606 No NPIV Fabric support\n");
1664 "2606 No NPIV Fabric support\n");
1665 } 2111 }
2112 mempool_free(mboxq, phba->mbox_mem_pool);
1666 return; 2113 return;
1667} 2114}
1668 2115
1669/** 2116/**
2117 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2118 * @vport: pointer to lpfc_vport data structure.
2119 *
2120 * This function issue a init_vpi mailbox command to initialize
2121 * VPI for the vport.
2122 */
2123void
2124lpfc_issue_init_vpi(struct lpfc_vport *vport)
2125{
2126 LPFC_MBOXQ_t *mboxq;
2127 int rc;
2128
2129 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2130 if (!mboxq) {
2131 lpfc_printf_vlog(vport, KERN_ERR,
2132 LOG_MBOX, "2607 Failed to allocate "
2133 "init_vpi mailbox\n");
2134 return;
2135 }
2136 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2137 mboxq->vport = vport;
2138 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2139 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2140 if (rc == MBX_NOT_FINISHED) {
2141 lpfc_printf_vlog(vport, KERN_ERR,
2142 LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
2143 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2144 }
2145}
2146
2147/**
1670 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 2148 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1671 * @phba: pointer to lpfc hba data structure. 2149 * @phba: pointer to lpfc hba data structure.
1672 * 2150 *
@@ -1678,8 +2156,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1678{ 2156{
1679 struct lpfc_vport **vports; 2157 struct lpfc_vport **vports;
1680 int i; 2158 int i;
1681 LPFC_MBOXQ_t *mboxq;
1682 int rc;
1683 2159
1684 vports = lpfc_create_vport_work_array(phba); 2160 vports = lpfc_create_vport_work_array(phba);
1685 if (vports != NULL) { 2161 if (vports != NULL) {
@@ -1698,26 +2174,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1698 continue; 2174 continue;
1699 } 2175 }
1700 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { 2176 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
1701 mboxq = mempool_alloc(phba->mbox_mem_pool, 2177 lpfc_issue_init_vpi(vports[i]);
1702 GFP_KERNEL);
1703 if (!mboxq) {
1704 lpfc_printf_vlog(vports[i], KERN_ERR,
1705 LOG_MBOX, "2607 Failed to allocate "
1706 "init_vpi mailbox\n");
1707 continue;
1708 }
1709 lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
1710 mboxq->vport = vports[i];
1711 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
1712 rc = lpfc_sli_issue_mbox(phba, mboxq,
1713 MBX_NOWAIT);
1714 if (rc == MBX_NOT_FINISHED) {
1715 lpfc_printf_vlog(vports[i], KERN_ERR,
1716 LOG_MBOX, "2608 Failed to issue "
1717 "init_vpi mailbox\n");
1718 mempool_free(mboxq,
1719 phba->mbox_mem_pool);
1720 }
1721 continue; 2178 continue;
1722 } 2179 }
1723 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 2180 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
@@ -1740,6 +2197,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1740{ 2197{
1741 struct lpfc_dmabuf *dmabuf = mboxq->context1; 2198 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1742 struct lpfc_vport *vport = mboxq->vport; 2199 struct lpfc_vport *vport = mboxq->vport;
2200 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1743 2201
1744 if (mboxq->u.mb.mbxStatus) { 2202 if (mboxq->u.mb.mbxStatus) {
1745 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 2203 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1756,8 +2214,12 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1756 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2214 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1757 goto fail_free_mem; 2215 goto fail_free_mem;
1758 } 2216 }
1759 /* Mark the vport has registered with its VFI */ 2217 /* The VPI is implicitly registered when the VFI is registered */
1760 vport->vfi_state |= LPFC_VFI_REGISTERED; 2218 spin_lock_irq(shost->host_lock);
2219 vport->vpi_state |= LPFC_VPI_REGISTERED;
2220 vport->fc_flag |= FC_VFI_REGISTERED;
2221 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2222 spin_unlock_irq(shost->host_lock);
1761 2223
1762 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2224 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1763 lpfc_start_fdiscs(phba); 2225 lpfc_start_fdiscs(phba);
@@ -1831,8 +2293,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1831 int rc; 2293 int rc;
1832 struct fcf_record *fcf_record; 2294 struct fcf_record *fcf_record;
1833 2295
1834 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1835
1836 spin_lock_irq(&phba->hbalock); 2296 spin_lock_irq(&phba->hbalock);
1837 switch (la->UlnkSpeed) { 2297 switch (la->UlnkSpeed) {
1838 case LA_1GHZ_LINK: 2298 case LA_1GHZ_LINK:
@@ -1861,7 +2321,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1861 if (phba->fc_topology == TOPOLOGY_LOOP) { 2321 if (phba->fc_topology == TOPOLOGY_LOOP) {
1862 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 2322 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
1863 2323
1864 if (phba->cfg_enable_npiv) 2324 /* if npiv is enabled and this adapter supports npiv log
2325 * a message that npiv is not supported in this topology
2326 */
2327 if (phba->cfg_enable_npiv && phba->max_vpi)
1865 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 2328 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1866 "1309 Link Up Event npiv not supported in loop " 2329 "1309 Link Up Event npiv not supported in loop "
1867 "topology\n"); 2330 "topology\n");
@@ -1921,18 +2384,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1921 spin_unlock_irq(&phba->hbalock); 2384 spin_unlock_irq(&phba->hbalock);
1922 2385
1923 lpfc_linkup(phba); 2386 lpfc_linkup(phba);
1924 if (sparam_mbox) { 2387 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1925 lpfc_read_sparam(phba, sparam_mbox, 0); 2388 if (!sparam_mbox)
1926 sparam_mbox->vport = vport; 2389 goto out;
1927 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 2390
1928 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 2391 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
1929 if (rc == MBX_NOT_FINISHED) { 2392 if (rc) {
1930 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 2393 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1931 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2394 goto out;
1932 kfree(mp); 2395 }
1933 mempool_free(sparam_mbox, phba->mbox_mem_pool); 2396 sparam_mbox->vport = vport;
1934 goto out; 2397 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1935 } 2398 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
2399 if (rc == MBX_NOT_FINISHED) {
2400 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
2401 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2402 kfree(mp);
2403 mempool_free(sparam_mbox, phba->mbox_mem_pool);
2404 goto out;
1936 } 2405 }
1937 2406
1938 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { 2407 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
@@ -1955,7 +2424,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1955 * is phase 1 implementation that support FCF index 0 and driver 2424 * is phase 1 implementation that support FCF index 0 and driver
1956 * defaults. 2425 * defaults.
1957 */ 2426 */
1958 if (phba->cfg_enable_fip == 0) { 2427 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1959 fcf_record = kzalloc(sizeof(struct fcf_record), 2428 fcf_record = kzalloc(sizeof(struct fcf_record),
1960 GFP_KERNEL); 2429 GFP_KERNEL);
1961 if (unlikely(!fcf_record)) { 2430 if (unlikely(!fcf_record)) {
@@ -1990,11 +2459,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1990 spin_unlock_irq(&phba->hbalock); 2459 spin_unlock_irq(&phba->hbalock);
1991 return; 2460 return;
1992 } 2461 }
2462 /* This is the initial FCF discovery scan */
2463 phba->fcf.fcf_flag |= FCF_INIT_DISC;
1993 spin_unlock_irq(&phba->hbalock); 2464 spin_unlock_irq(&phba->hbalock);
1994 rc = lpfc_sli4_read_fcf_record(phba, 2465 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1995 LPFC_FCOE_FCF_GET_FIRST); 2466 "2778 Start FCF table scan at linkup\n");
1996 if (rc) 2467
2468 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2469 LPFC_FCOE_FCF_GET_FIRST);
2470 if (rc) {
2471 spin_lock_irq(&phba->hbalock);
2472 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
2473 spin_unlock_irq(&phba->hbalock);
1997 goto out; 2474 goto out;
2475 }
1998 } 2476 }
1999 2477
2000 return; 2478 return;
@@ -2080,11 +2558,14 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2080 } 2558 }
2081 2559
2082 phba->fc_eventTag = la->eventTag; 2560 phba->fc_eventTag = la->eventTag;
2561 spin_lock_irq(&phba->hbalock);
2083 if (la->mm) 2562 if (la->mm)
2084 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 2563 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
2085 else 2564 else
2086 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 2565 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
2566 spin_unlock_irq(&phba->hbalock);
2087 2567
2568 phba->link_events++;
2088 if (la->attType == AT_LINK_UP && (!la->mm)) { 2569 if (la->attType == AT_LINK_UP && (!la->mm)) {
2089 phba->fc_stat.LinkUp++; 2570 phba->fc_stat.LinkUp++;
2090 if (phba->link_flag & LS_LOOPBACK_MODE) { 2571 if (phba->link_flag & LS_LOOPBACK_MODE) {
@@ -2211,13 +2692,17 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2211 mb->mbxStatus); 2692 mb->mbxStatus);
2212 break; 2693 break;
2213 } 2694 }
2695 spin_lock_irq(shost->host_lock);
2696 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
2697 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2698 spin_unlock_irq(shost->host_lock);
2214 vport->unreg_vpi_cmpl = VPORT_OK; 2699 vport->unreg_vpi_cmpl = VPORT_OK;
2215 mempool_free(pmb, phba->mbox_mem_pool); 2700 mempool_free(pmb, phba->mbox_mem_pool);
2216 /* 2701 /*
2217 * This shost reference might have been taken at the beginning of 2702 * This shost reference might have been taken at the beginning of
2218 * lpfc_vport_delete() 2703 * lpfc_vport_delete()
2219 */ 2704 */
2220 if (vport->load_flag & FC_UNLOADING) 2705 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
2221 scsi_host_put(shost); 2706 scsi_host_put(shost);
2222} 2707}
2223 2708
@@ -2268,6 +2753,10 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2268 goto out; 2753 goto out;
2269 } 2754 }
2270 2755
2756 spin_lock_irq(shost->host_lock);
2757 vport->vpi_state |= LPFC_VPI_REGISTERED;
2758 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2759 spin_unlock_irq(shost->host_lock);
2271 vport->num_disc_nodes = 0; 2760 vport->num_disc_nodes = 0;
2272 /* go thru NPR list and issue ELS PLOGIs */ 2761 /* go thru NPR list and issue ELS PLOGIs */
2273 if (vport->fc_npr_cnt) 2762 if (vport->fc_npr_cnt)
@@ -3077,7 +3566,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3077 struct lpfc_sli *psli; 3566 struct lpfc_sli *psli;
3078 struct lpfc_sli_ring *pring; 3567 struct lpfc_sli_ring *pring;
3079 struct lpfc_iocbq *iocb, *next_iocb; 3568 struct lpfc_iocbq *iocb, *next_iocb;
3080 uint32_t rpi, i; 3569 uint32_t i;
3081 3570
3082 lpfc_fabric_abort_nport(ndlp); 3571 lpfc_fabric_abort_nport(ndlp);
3083 3572
@@ -3086,7 +3575,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3086 * by firmware with a no rpi error. 3575 * by firmware with a no rpi error.
3087 */ 3576 */
3088 psli = &phba->sli; 3577 psli = &phba->sli;
3089 rpi = ndlp->nlp_rpi;
3090 if (ndlp->nlp_flag & NLP_RPI_VALID) { 3578 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3091 /* Now process each ring */ 3579 /* Now process each ring */
3092 for (i = 0; i < psli->num_rings; i++) { 3580 for (i = 0; i < psli->num_rings; i++) {
@@ -3154,6 +3642,38 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3154 return 0; 3642 return 0;
3155} 3643}
3156 3644
3645/**
3646 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
3647 * @phba: pointer to lpfc hba data structure.
3648 *
3649 * This routine is invoked to unregister all the currently registered RPIs
3650 * to the HBA.
3651 **/
3652void
3653lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
3654{
3655 struct lpfc_vport **vports;
3656 struct lpfc_nodelist *ndlp;
3657 struct Scsi_Host *shost;
3658 int i;
3659
3660 vports = lpfc_create_vport_work_array(phba);
3661 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3662 shost = lpfc_shost_from_vport(vports[i]);
3663 spin_lock_irq(shost->host_lock);
3664 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
3665 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3666 /* The mempool_alloc might sleep */
3667 spin_unlock_irq(shost->host_lock);
3668 lpfc_unreg_rpi(vports[i], ndlp);
3669 spin_lock_irq(shost->host_lock);
3670 }
3671 }
3672 spin_unlock_irq(shost->host_lock);
3673 }
3674 lpfc_destroy_vport_work_array(phba, vports);
3675}
3676
3157void 3677void
3158lpfc_unreg_all_rpis(struct lpfc_vport *vport) 3678lpfc_unreg_all_rpis(struct lpfc_vport *vport)
3159{ 3679{
@@ -4322,6 +4842,14 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
4322 ret = 1; 4842 ret = 1;
4323 spin_unlock_irq(shost->host_lock); 4843 spin_unlock_irq(shost->host_lock);
4324 goto out; 4844 goto out;
4845 } else {
4846 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
4847 "2624 RPI %x DID %x flg %x still "
4848 "logged in\n",
4849 ndlp->nlp_rpi, ndlp->nlp_DID,
4850 ndlp->nlp_flag);
4851 if (ndlp->nlp_flag & NLP_RPI_VALID)
4852 ret = 1;
4325 } 4853 }
4326 } 4854 }
4327 spin_unlock_irq(shost->host_lock); 4855 spin_unlock_irq(shost->host_lock);
@@ -4376,120 +4904,231 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4376} 4904}
4377 4905
4378/** 4906/**
4379 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. 4907 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
4380 * @phba: Pointer to hba context object. 4908 * @phba: Pointer to hba context object.
4381 * 4909 *
4382 * This function check if there are any connected remote port for the FCF and 4910 * This function prepare the HBA for unregistering the currently registered
4383 * if all the devices are disconnected, this function unregister FCFI. 4911 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
4384 * This function also tries to use another FCF for discovery. 4912 * VFIs.
4385 */ 4913 */
4386void 4914int
4387lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 4915lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
4388{ 4916{
4389 LPFC_MBOXQ_t *mbox; 4917 LPFC_MBOXQ_t *mbox;
4390 int rc;
4391 struct lpfc_vport **vports; 4918 struct lpfc_vport **vports;
4392 int i; 4919 struct lpfc_nodelist *ndlp;
4393 4920 struct Scsi_Host *shost;
4394 spin_lock_irq(&phba->hbalock); 4921 int i, rc;
4395 /*
4396 * If HBA is not running in FIP mode or
4397 * If HBA does not support FCoE or
4398 * If FCF is not registered.
4399 * do nothing.
4400 */
4401 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4402 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4403 (phba->cfg_enable_fip == 0)) {
4404 spin_unlock_irq(&phba->hbalock);
4405 return;
4406 }
4407 spin_unlock_irq(&phba->hbalock);
4408 4922
4923 /* Unregister RPIs */
4409 if (lpfc_fcf_inuse(phba)) 4924 if (lpfc_fcf_inuse(phba))
4410 return; 4925 lpfc_unreg_hba_rpis(phba);
4411 4926
4927 /* At this point, all discovery is aborted */
4928 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4412 4929
4413 /* Unregister VPIs */ 4930 /* Unregister VPIs */
4414 vports = lpfc_create_vport_work_array(phba); 4931 vports = lpfc_create_vport_work_array(phba);
4415 if (vports && 4932 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4416 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4933 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4934 /* Stop FLOGI/FDISC retries */
4935 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
4936 if (ndlp)
4937 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
4418 lpfc_mbx_unreg_vpi(vports[i]); 4938 lpfc_mbx_unreg_vpi(vports[i]);
4419 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4939 shost = lpfc_shost_from_vport(vports[i]);
4420 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 4940 spin_lock_irq(shost->host_lock);
4941 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4942 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
4943 spin_unlock_irq(shost->host_lock);
4421 } 4944 }
4422 lpfc_destroy_vport_work_array(phba, vports); 4945 lpfc_destroy_vport_work_array(phba, vports);
4423 4946
4947 /* Cleanup any outstanding ELS commands */
4948 lpfc_els_flush_all_cmd(phba);
4949
4424 /* Unregister VFI */ 4950 /* Unregister VFI */
4425 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4951 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4426 if (!mbox) { 4952 if (!mbox) {
4427 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4953 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4428 "2556 UNREG_VFI mbox allocation failed" 4954 "2556 UNREG_VFI mbox allocation failed"
4429 "HBA state x%x\n", 4955 "HBA state x%x\n", phba->pport->port_state);
4430 phba->pport->port_state); 4956 return -ENOMEM;
4431 return;
4432 } 4957 }
4433 4958
4434 lpfc_unreg_vfi(mbox, phba->pport->vfi); 4959 lpfc_unreg_vfi(mbox, phba->pport);
4435 mbox->vport = phba->pport; 4960 mbox->vport = phba->pport;
4436 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; 4961 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4437 4962
4438 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4963 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4439 if (rc == MBX_NOT_FINISHED) { 4964 if (rc == MBX_NOT_FINISHED) {
4440 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4965 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4441 "2557 UNREG_VFI issue mbox failed rc x%x " 4966 "2557 UNREG_VFI issue mbox failed rc x%x "
4442 "HBA state x%x\n", 4967 "HBA state x%x\n",
4443 rc, phba->pport->port_state); 4968 rc, phba->pport->port_state);
4444 mempool_free(mbox, phba->mbox_mem_pool); 4969 mempool_free(mbox, phba->mbox_mem_pool);
4445 return; 4970 return -EIO;
4446 } 4971 }
4447 4972
4448 /* Unregister FCF */ 4973 shost = lpfc_shost_from_vport(phba->pport);
4974 spin_lock_irq(shost->host_lock);
4975 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
4976 spin_unlock_irq(shost->host_lock);
4977
4978 return 0;
4979}
4980
4981/**
4982 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
4983 * @phba: Pointer to hba context object.
4984 *
4985 * This function issues synchronous unregister FCF mailbox command to HBA to
4986 * unregister the currently registered FCF record. The driver does not reset
4987 * the driver FCF usage state flags.
4988 *
4989 * Return 0 if successfully issued, none-zero otherwise.
4990 */
4991int
4992lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
4993{
4994 LPFC_MBOXQ_t *mbox;
4995 int rc;
4996
4449 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4997 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4450 if (!mbox) { 4998 if (!mbox) {
4451 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4999 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4452 "2551 UNREG_FCFI mbox allocation failed" 5000 "2551 UNREG_FCFI mbox allocation failed"
4453 "HBA state x%x\n", 5001 "HBA state x%x\n", phba->pport->port_state);
4454 phba->pport->port_state); 5002 return -ENOMEM;
4455 return;
4456 } 5003 }
4457
4458 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); 5004 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4459 mbox->vport = phba->pport; 5005 mbox->vport = phba->pport;
4460 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; 5006 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4461 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5007 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4462 5008
4463 if (rc == MBX_NOT_FINISHED) { 5009 if (rc == MBX_NOT_FINISHED) {
4464 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5010 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4465 "2552 UNREG_FCFI issue mbox failed rc x%x " 5011 "2552 Unregister FCFI command failed rc x%x "
4466 "HBA state x%x\n", 5012 "HBA state x%x\n",
4467 rc, phba->pport->port_state); 5013 rc, phba->pport->port_state);
4468 mempool_free(mbox, phba->mbox_mem_pool); 5014 return -EINVAL;
5015 }
5016 return 0;
5017}
5018
5019/**
5020 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5021 * @phba: Pointer to hba context object.
5022 *
5023 * This function unregisters the currently reigstered FCF. This function
5024 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5025 */
5026void
5027lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
5028{
5029 int rc;
5030
5031 /* Preparation for unregistering fcf */
5032 rc = lpfc_unregister_fcf_prep(phba);
5033 if (rc) {
5034 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
5035 "2748 Failed to prepare for unregistering "
5036 "HBA's FCF record: rc=%d\n", rc);
4469 return; 5037 return;
4470 } 5038 }
4471 5039
4472 spin_lock_irq(&phba->hbalock); 5040 /* Now, unregister FCF record and reset HBA FCF state */
4473 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | 5041 rc = lpfc_sli4_unregister_fcf(phba);
4474 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE | 5042 if (rc)
4475 FCF_VALID_VLAN); 5043 return;
4476 spin_unlock_irq(&phba->hbalock); 5044 /* Reset HBA FCF states after successful unregister FCF */
5045 phba->fcf.fcf_flag = 0;
5046 phba->fcf.current_rec.flag = 0;
4477 5047
4478 /* 5048 /*
4479 * If driver is not unloading, check if there is any other 5049 * If driver is not unloading, check if there is any other
4480 * FCF record that can be used for discovery. 5050 * FCF record that can be used for discovery.
4481 */ 5051 */
4482 if ((phba->pport->load_flag & FC_UNLOADING) || 5052 if ((phba->pport->load_flag & FC_UNLOADING) ||
4483 (phba->link_state < LPFC_LINK_UP)) 5053 (phba->link_state < LPFC_LINK_UP))
4484 return; 5054 return;
4485 5055
4486 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 5056 /* This is considered as the initial FCF discovery scan */
5057 spin_lock_irq(&phba->hbalock);
5058 phba->fcf.fcf_flag |= FCF_INIT_DISC;
5059 spin_unlock_irq(&phba->hbalock);
5060 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4487 5061
4488 if (rc) 5062 if (rc) {
5063 spin_lock_irq(&phba->hbalock);
5064 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
5065 spin_unlock_irq(&phba->hbalock);
4489 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5066 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4490 "2553 lpfc_unregister_unused_fcf failed to read FCF" 5067 "2553 lpfc_unregister_unused_fcf failed "
4491 " record HBA state x%x\n", 5068 "to read FCF record HBA state x%x\n",
4492 phba->pport->port_state); 5069 phba->pport->port_state);
5070 }
5071}
5072
5073/**
5074 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5075 * @phba: Pointer to hba context object.
5076 *
5077 * This function just unregisters the currently reigstered FCF. It does not
5078 * try to find another FCF for discovery.
5079 */
5080void
5081lpfc_unregister_fcf(struct lpfc_hba *phba)
5082{
5083 int rc;
5084
5085 /* Preparation for unregistering fcf */
5086 rc = lpfc_unregister_fcf_prep(phba);
5087 if (rc) {
5088 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
5089 "2749 Failed to prepare for unregistering "
5090 "HBA's FCF record: rc=%d\n", rc);
5091 return;
5092 }
5093
5094 /* Now, unregister FCF record and reset HBA FCF state */
5095 rc = lpfc_sli4_unregister_fcf(phba);
5096 if (rc)
5097 return;
5098 /* Set proper HBA FCF states after successful unregister FCF */
5099 spin_lock_irq(&phba->hbalock);
5100 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
5101 spin_unlock_irq(&phba->hbalock);
5102}
5103
5104/**
5105 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
5106 * @phba: Pointer to hba context object.
5107 *
5108 * This function check if there are any connected remote port for the FCF and
5109 * if all the devices are disconnected, this function unregister FCFI.
5110 * This function also tries to use another FCF for discovery.
5111 */
5112void
5113lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
5114{
5115 /*
5116 * If HBA is not running in FIP mode or if HBA does not support
5117 * FCoE or if FCF is not registered, do nothing.
5118 */
5119 spin_lock_irq(&phba->hbalock);
5120 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
5121 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
5122 !(phba->hba_flag & HBA_FIP_SUPPORT)) {
5123 spin_unlock_irq(&phba->hbalock);
5124 return;
5125 }
5126 spin_unlock_irq(&phba->hbalock);
5127
5128 if (lpfc_fcf_inuse(phba))
5129 return;
5130
5131 lpfc_unregister_fcf_rescan(phba);
4493} 5132}
4494 5133
4495/** 5134/**
@@ -4512,8 +5151,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4512 5151
4513 /* Free the current connect table */ 5152 /* Free the current connect table */
4514 list_for_each_entry_safe(conn_entry, next_conn_entry, 5153 list_for_each_entry_safe(conn_entry, next_conn_entry,
4515 &phba->fcf_conn_rec_list, list) 5154 &phba->fcf_conn_rec_list, list) {
5155 list_del_init(&conn_entry->list);
4516 kfree(conn_entry); 5156 kfree(conn_entry);
5157 }
4517 5158
4518 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 5159 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4519 record_count = conn_hdr->length * sizeof(uint32_t)/ 5160 record_count = conn_hdr->length * sizeof(uint32_t)/
@@ -4569,14 +5210,6 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
4569 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 5210 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4570 return; 5211 return;
4571 5212
4572 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4573 FIPP_MODE_ON)
4574 phba->cfg_enable_fip = 1;
4575
4576 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4577 FIPP_MODE_OFF)
4578 phba->cfg_enable_fip = 0;
4579
4580 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 5213 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4581 phba->valid_vlan = 1; 5214 phba->valid_vlan = 1;
4582 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 5215 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ccb26724dc53..89ff7c09e298 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -1124,21 +1124,6 @@ typedef struct {
1124/* Number of 4-byte words in an IOCB. */ 1124/* Number of 4-byte words in an IOCB. */
1125#define IOCB_WORD_SZ 8 1125#define IOCB_WORD_SZ 8
1126 1126
1127/* defines for type field in fc header */
1128#define FC_ELS_DATA 0x1
1129#define FC_LLC_SNAP 0x5
1130#define FC_FCP_DATA 0x8
1131#define FC_COMMON_TRANSPORT_ULP 0x20
1132
1133/* defines for rctl field in fc header */
1134#define FC_DEV_DATA 0x0
1135#define FC_UNSOL_CTL 0x2
1136#define FC_SOL_CTL 0x3
1137#define FC_UNSOL_DATA 0x4
1138#define FC_FCP_CMND 0x6
1139#define FC_ELS_REQ 0x22
1140#define FC_ELS_RSP 0x23
1141
1142/* network headers for Dfctl field */ 1127/* network headers for Dfctl field */
1143#define FC_NET_HDR 0x20 1128#define FC_NET_HDR 0x20
1144 1129
@@ -1183,6 +1168,8 @@ typedef struct {
1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1168#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 1169#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1170#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1171#define PCI_DEVICE_ID_TOMCAT 0x0714
1172#define PCI_DEVICE_ID_FALCON 0xf180
1186 1173
1187#define JEDEC_ID_ADDRESS 0x0080001c 1174#define JEDEC_ID_ADDRESS 0x0080001c
1188#define FIREFLY_JEDEC_ID 0x1ACC 1175#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1359,6 +1346,9 @@ typedef struct { /* FireFly BIU registers */
1359#define MBX_HEARTBEAT 0x31 1346#define MBX_HEARTBEAT 0x31
1360#define MBX_WRITE_VPARMS 0x32 1347#define MBX_WRITE_VPARMS 0x32
1361#define MBX_ASYNCEVT_ENABLE 0x33 1348#define MBX_ASYNCEVT_ENABLE 0x33
1349#define MBX_READ_EVENT_LOG_STATUS 0x37
1350#define MBX_READ_EVENT_LOG 0x38
1351#define MBX_WRITE_EVENT_LOG 0x39
1362 1352
1363#define MBX_PORT_CAPABILITIES 0x3B 1353#define MBX_PORT_CAPABILITIES 0x3B
1364#define MBX_PORT_IOV_CONTROL 0x3C 1354#define MBX_PORT_IOV_CONTROL 0x3C
@@ -1444,6 +1434,7 @@ typedef struct { /* FireFly BIU registers */
1444#define CMD_ABORT_MXRI64_CN 0x8C 1434#define CMD_ABORT_MXRI64_CN 0x8C
1445#define CMD_RCV_ELS_REQ64_CX 0x8D 1435#define CMD_RCV_ELS_REQ64_CX 0x8D
1446#define CMD_XMIT_ELS_RSP64_CX 0x95 1436#define CMD_XMIT_ELS_RSP64_CX 0x95
1437#define CMD_XMIT_BLS_RSP64_CX 0x97
1447#define CMD_FCP_IWRITE64_CR 0x98 1438#define CMD_FCP_IWRITE64_CR 0x98
1448#define CMD_FCP_IWRITE64_CX 0x99 1439#define CMD_FCP_IWRITE64_CX 0x99
1449#define CMD_FCP_IREAD64_CR 0x9A 1440#define CMD_FCP_IREAD64_CR 0x9A
@@ -1477,17 +1468,13 @@ typedef struct { /* FireFly BIU registers */
1477#define CMD_IOCB_LOGENTRY_CN 0x94 1468#define CMD_IOCB_LOGENTRY_CN 0x94
1478#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1469#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1479 1470
1480/* Unhandled Data Security SLI Commands */ 1471/* Data Security SLI Commands */
1481#define DSSCMD_IWRITE64_CR 0xD8 1472#define DSSCMD_IWRITE64_CR 0xF8
1482#define DSSCMD_IWRITE64_CX 0xD9 1473#define DSSCMD_IWRITE64_CX 0xF9
1483#define DSSCMD_IREAD64_CR 0xDA 1474#define DSSCMD_IREAD64_CR 0xFA
1484#define DSSCMD_IREAD64_CX 0xDB 1475#define DSSCMD_IREAD64_CX 0xFB
1485#define DSSCMD_INVALIDATE_DEK 0xDC 1476
1486#define DSSCMD_SET_KEK 0xDD 1477#define CMD_MAX_IOCB_CMD 0xFB
1487#define DSSCMD_GET_KEK_ID 0xDE
1488#define DSSCMD_GEN_XFER 0xDF
1489
1490#define CMD_MAX_IOCB_CMD 0xE6
1491#define CMD_IOCB_MASK 0xff 1478#define CMD_IOCB_MASK 0xff
1492 1479
1493#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG 1480#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG
@@ -2306,8 +2293,7 @@ typedef struct {
2306 uint32_t rsvd1; 2293 uint32_t rsvd1;
2307 uint32_t rsvd2:8; 2294 uint32_t rsvd2:8;
2308 uint32_t sid:24; 2295 uint32_t sid:24;
2309 uint32_t rsvd3; 2296 uint32_t wwn[2];
2310 uint32_t rsvd4;
2311 uint32_t rsvd5; 2297 uint32_t rsvd5;
2312 uint16_t vfi; 2298 uint16_t vfi;
2313 uint16_t vpi; 2299 uint16_t vpi;
@@ -2315,8 +2301,7 @@ typedef struct {
2315 uint32_t rsvd1; 2301 uint32_t rsvd1;
2316 uint32_t sid:24; 2302 uint32_t sid:24;
2317 uint32_t rsvd2:8; 2303 uint32_t rsvd2:8;
2318 uint32_t rsvd3; 2304 uint32_t wwn[2];
2319 uint32_t rsvd4;
2320 uint32_t rsvd5; 2305 uint32_t rsvd5;
2321 uint16_t vpi; 2306 uint16_t vpi;
2322 uint16_t vfi; 2307 uint16_t vfi;
@@ -2326,7 +2311,13 @@ typedef struct {
2326/* Structure for MB Command UNREG_VPI (0x97) */ 2311/* Structure for MB Command UNREG_VPI (0x97) */
2327typedef struct { 2312typedef struct {
2328 uint32_t rsvd1; 2313 uint32_t rsvd1;
2329 uint32_t rsvd2; 2314#ifdef __BIG_ENDIAN_BITFIELD
2315 uint16_t rsvd2;
2316 uint16_t sli4_vpi;
2317#else /* __LITTLE_ENDIAN */
2318 uint16_t sli4_vpi;
2319 uint16_t rsvd2;
2320#endif
2330 uint32_t rsvd3; 2321 uint32_t rsvd3;
2331 uint32_t rsvd4; 2322 uint32_t rsvd4;
2332 uint32_t rsvd5; 2323 uint32_t rsvd5;
@@ -3547,7 +3538,7 @@ typedef struct _IOCB { /* IOCB structure */
3547 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ 3538 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
3548 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ 3539 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
3549 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ 3540 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */
3550 3541 struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */
3551 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ 3542 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
3552 } un; 3543 } un;
3553 union { 3544 union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 3689eee04535..820015fbc4d6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -52,35 +52,37 @@ struct dma_address {
52 uint32_t addr_hi; 52 uint32_t addr_hi;
53}; 53};
54 54
55#define LPFC_SLIREV_CONF_WORD 0x58
56struct lpfc_sli_intf { 55struct lpfc_sli_intf {
57 uint32_t word0; 56 uint32_t word0;
58#define lpfc_sli_intf_iftype_MASK 0x00000007 57#define lpfc_sli_intf_valid_SHIFT 29
59#define lpfc_sli_intf_iftype_SHIFT 0 58#define lpfc_sli_intf_valid_MASK 0x00000007
60#define lpfc_sli_intf_iftype_WORD word0 59#define lpfc_sli_intf_valid_WORD word0
61#define lpfc_sli_intf_rev_MASK 0x0000000f
62#define lpfc_sli_intf_rev_SHIFT 4
63#define lpfc_sli_intf_rev_WORD word0
64#define LPFC_SLIREV_CONF_SLI4 4
65#define lpfc_sli_intf_family_MASK 0x000000ff
66#define lpfc_sli_intf_family_SHIFT 8
67#define lpfc_sli_intf_family_WORD word0
68#define lpfc_sli_intf_feat1_MASK 0x000000ff
69#define lpfc_sli_intf_feat1_SHIFT 16
70#define lpfc_sli_intf_feat1_WORD word0
71#define lpfc_sli_intf_feat2_MASK 0x0000001f
72#define lpfc_sli_intf_feat2_SHIFT 24
73#define lpfc_sli_intf_feat2_WORD word0
74#define lpfc_sli_intf_valid_MASK 0x00000007
75#define lpfc_sli_intf_valid_SHIFT 29
76#define lpfc_sli_intf_valid_WORD word0
77#define LPFC_SLI_INTF_VALID 6 60#define LPFC_SLI_INTF_VALID 6
61#define lpfc_sli_intf_featurelevel2_SHIFT 24
62#define lpfc_sli_intf_featurelevel2_MASK 0x0000001F
63#define lpfc_sli_intf_featurelevel2_WORD word0
64#define lpfc_sli_intf_featurelevel1_SHIFT 16
65#define lpfc_sli_intf_featurelevel1_MASK 0x000000FF
66#define lpfc_sli_intf_featurelevel1_WORD word0
67#define LPFC_SLI_INTF_FEATURELEVEL1_1 1
68#define LPFC_SLI_INTF_FEATURELEVEL1_2 2
69#define lpfc_sli_intf_sli_family_SHIFT 8
70#define lpfc_sli_intf_sli_family_MASK 0x000000FF
71#define lpfc_sli_intf_sli_family_WORD word0
72#define LPFC_SLI_INTF_FAMILY_BE2 0
73#define LPFC_SLI_INTF_FAMILY_BE3 1
74#define lpfc_sli_intf_slirev_SHIFT 4
75#define lpfc_sli_intf_slirev_MASK 0x0000000F
76#define lpfc_sli_intf_slirev_WORD word0
77#define LPFC_SLI_INTF_REV_SLI3 3
78#define LPFC_SLI_INTF_REV_SLI4 4
79#define lpfc_sli_intf_if_type_SHIFT 0
80#define lpfc_sli_intf_if_type_MASK 0x00000007
81#define lpfc_sli_intf_if_type_WORD word0
82#define LPFC_SLI_INTF_IF_TYPE_0 0
83#define LPFC_SLI_INTF_IF_TYPE_1 1
78}; 84};
79 85
80#define LPFC_SLI4_BAR0 1
81#define LPFC_SLI4_BAR1 2
82#define LPFC_SLI4_BAR2 4
83
84#define LPFC_SLI4_MBX_EMBED true 86#define LPFC_SLI4_MBX_EMBED true
85#define LPFC_SLI4_MBX_NEMBED false 87#define LPFC_SLI4_MBX_NEMBED false
86 88
@@ -161,6 +163,9 @@ struct lpfc_sli_intf {
161#define LPFC_FP_DEF_IMAX 10000 163#define LPFC_FP_DEF_IMAX 10000
162#define LPFC_SP_DEF_IMAX 10000 164#define LPFC_SP_DEF_IMAX 10000
163 165
166/* PORT_CAPABILITIES constants. */
167#define LPFC_MAX_SUPPORTED_PAGES 8
168
164struct ulp_bde64 { 169struct ulp_bde64 {
165 union ULP_BDE_TUS { 170 union ULP_BDE_TUS {
166 uint32_t w; 171 uint32_t w;
@@ -194,6 +199,26 @@ struct lpfc_sli4_flags {
194#define lpfc_fip_flag_WORD word0 199#define lpfc_fip_flag_WORD word0
195}; 200};
196 201
202struct sli4_bls_acc {
203 uint32_t word0_rsvd; /* Word0 must be reserved */
204 uint32_t word1;
205#define lpfc_abts_orig_SHIFT 0
206#define lpfc_abts_orig_MASK 0x00000001
207#define lpfc_abts_orig_WORD word1
208#define LPFC_ABTS_UNSOL_RSP 1
209#define LPFC_ABTS_UNSOL_INT 0
210 uint32_t word2;
211#define lpfc_abts_rxid_SHIFT 0
212#define lpfc_abts_rxid_MASK 0x0000FFFF
213#define lpfc_abts_rxid_WORD word2
214#define lpfc_abts_oxid_SHIFT 16
215#define lpfc_abts_oxid_MASK 0x0000FFFF
216#define lpfc_abts_oxid_WORD word2
217 uint32_t word3;
218 uint32_t word4;
219 uint32_t word5_rsvd; /* Word5 must be reserved */
220};
221
197/* event queue entry structure */ 222/* event queue entry structure */
198struct lpfc_eqe { 223struct lpfc_eqe {
199 uint32_t word0; 224 uint32_t word0;
@@ -425,7 +450,7 @@ struct lpfc_wqe_generic{
425#define lpfc_wqe_gen_status_MASK 0x0000000F 450#define lpfc_wqe_gen_status_MASK 0x0000000F
426#define lpfc_wqe_gen_status_WORD word7 451#define lpfc_wqe_gen_status_WORD word7
427#define lpfc_wqe_gen_ct_SHIFT 2 452#define lpfc_wqe_gen_ct_SHIFT 2
428#define lpfc_wqe_gen_ct_MASK 0x00000007 453#define lpfc_wqe_gen_ct_MASK 0x00000003
429#define lpfc_wqe_gen_ct_WORD word7 454#define lpfc_wqe_gen_ct_WORD word7
430 uint32_t abort_tag; 455 uint32_t abort_tag;
431 uint32_t word9; 456 uint32_t word9;
@@ -453,6 +478,13 @@ struct lpfc_wqe_generic{
453#define lpfc_wqe_gen_wqec_SHIFT 7 478#define lpfc_wqe_gen_wqec_SHIFT 7
454#define lpfc_wqe_gen_wqec_MASK 0x00000001 479#define lpfc_wqe_gen_wqec_MASK 0x00000001
455#define lpfc_wqe_gen_wqec_WORD word11 480#define lpfc_wqe_gen_wqec_WORD word11
481#define ELS_ID_FLOGI 3
482#define ELS_ID_FDISC 2
483#define ELS_ID_LOGO 1
484#define ELS_ID_DEFAULT 0
485#define lpfc_wqe_gen_els_id_SHIFT 4
486#define lpfc_wqe_gen_els_id_MASK 0x00000003
487#define lpfc_wqe_gen_els_id_WORD word11
456#define lpfc_wqe_gen_cmd_type_SHIFT 0 488#define lpfc_wqe_gen_cmd_type_SHIFT 0
457#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F 489#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
458#define lpfc_wqe_gen_cmd_type_WORD word11 490#define lpfc_wqe_gen_cmd_type_WORD word11
@@ -487,9 +519,9 @@ struct lpfc_register {
487 519
488#define LPFC_UERR_STATUS_HI 0x00A4 520#define LPFC_UERR_STATUS_HI 0x00A4
489#define LPFC_UERR_STATUS_LO 0x00A0 521#define LPFC_UERR_STATUS_LO 0x00A0
490#define LPFC_ONLINE0 0x00B0 522#define LPFC_UE_MASK_HI 0x00AC
491#define LPFC_ONLINE1 0x00B4 523#define LPFC_UE_MASK_LO 0x00A8
492#define LPFC_SCRATCHPAD 0x0058 524#define LPFC_SLI_INTF 0x0058
493 525
494/* BAR0 Registers */ 526/* BAR0 Registers */
495#define LPFC_HST_STATE 0x00AC 527#define LPFC_HST_STATE 0x00AC
@@ -549,19 +581,6 @@ struct lpfc_register {
549#define LPFC_POST_STAGE_ARMFW_READY 0xC000 581#define LPFC_POST_STAGE_ARMFW_READY 0xC000
550#define LPFC_POST_STAGE_ARMFW_UE 0xF000 582#define LPFC_POST_STAGE_ARMFW_UE 0xF000
551 583
552#define lpfc_scratchpad_slirev_SHIFT 4
553#define lpfc_scratchpad_slirev_MASK 0xF
554#define lpfc_scratchpad_slirev_WORD word0
555#define lpfc_scratchpad_chiptype_SHIFT 8
556#define lpfc_scratchpad_chiptype_MASK 0xFF
557#define lpfc_scratchpad_chiptype_WORD word0
558#define lpfc_scratchpad_featurelevel1_SHIFT 16
559#define lpfc_scratchpad_featurelevel1_MASK 0xFF
560#define lpfc_scratchpad_featurelevel1_WORD word0
561#define lpfc_scratchpad_featurelevel2_SHIFT 24
562#define lpfc_scratchpad_featurelevel2_MASK 0xFF
563#define lpfc_scratchpad_featurelevel2_WORD word0
564
565/* BAR1 Registers */ 584/* BAR1 Registers */
566#define LPFC_IMR_MASK_ALL 0xFFFFFFFF 585#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
567#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF 586#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
@@ -760,6 +779,7 @@ struct mbox_header {
760#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 779#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
761#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 780#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
762#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 781#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
782#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
763#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 783#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
764 784
765/* FCoE Opcodes */ 785/* FCoE Opcodes */
@@ -773,6 +793,7 @@ struct mbox_header {
773#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 793#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
774#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A 794#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
775#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B 795#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
796#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
776 797
777/* Mailbox command structures */ 798/* Mailbox command structures */
778struct eq_context { 799struct eq_context {
@@ -985,7 +1006,7 @@ struct lpfc_mbx_wq_destroy {
985}; 1006};
986 1007
987#define LPFC_HDR_BUF_SIZE 128 1008#define LPFC_HDR_BUF_SIZE 128
988#define LPFC_DATA_BUF_SIZE 4096 1009#define LPFC_DATA_BUF_SIZE 2048
989struct rq_context { 1010struct rq_context {
990 uint32_t word0; 1011 uint32_t word0;
991#define lpfc_rq_context_rq_size_SHIFT 16 1012#define lpfc_rq_context_rq_size_SHIFT 16
@@ -1121,10 +1142,7 @@ struct sli4_sge { /* SLI-4 */
1121 this flag !! */ 1142 this flag !! */
1122#define lpfc_sli4_sge_last_MASK 0x00000001 1143#define lpfc_sli4_sge_last_MASK 0x00000001
1123#define lpfc_sli4_sge_last_WORD word2 1144#define lpfc_sli4_sge_last_WORD word2
1124 uint32_t word3; 1145 uint32_t sge_len;
1125#define lpfc_sli4_sge_len_SHIFT 0
1126#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1127#define lpfc_sli4_sge_len_WORD word3
1128}; 1146};
1129 1147
1130struct fcf_record { 1148struct fcf_record {
@@ -1273,6 +1291,64 @@ struct lpfc_mbx_del_fcf_tbl_entry {
1273#define lpfc_mbx_del_fcf_tbl_index_WORD word10 1291#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1274}; 1292};
1275 1293
1294struct lpfc_mbx_redisc_fcf_tbl {
1295 struct mbox_header header;
1296 uint32_t word10;
1297#define lpfc_mbx_redisc_fcf_count_SHIFT 0
1298#define lpfc_mbx_redisc_fcf_count_MASK 0x0000FFFF
1299#define lpfc_mbx_redisc_fcf_count_WORD word10
1300 uint32_t resvd;
1301 uint32_t word12;
1302#define lpfc_mbx_redisc_fcf_index_SHIFT 0
1303#define lpfc_mbx_redisc_fcf_index_MASK 0x0000FFFF
1304#define lpfc_mbx_redisc_fcf_index_WORD word12
1305};
1306
1307struct lpfc_mbx_query_fw_cfg {
1308 struct mbox_header header;
1309 uint32_t config_number;
1310 uint32_t asic_rev;
1311 uint32_t phys_port;
1312 uint32_t function_mode;
1313/* firmware Function Mode */
1314#define lpfc_function_mode_toe_SHIFT 0
1315#define lpfc_function_mode_toe_MASK 0x00000001
1316#define lpfc_function_mode_toe_WORD function_mode
1317#define lpfc_function_mode_nic_SHIFT 1
1318#define lpfc_function_mode_nic_MASK 0x00000001
1319#define lpfc_function_mode_nic_WORD function_mode
1320#define lpfc_function_mode_rdma_SHIFT 2
1321#define lpfc_function_mode_rdma_MASK 0x00000001
1322#define lpfc_function_mode_rdma_WORD function_mode
1323#define lpfc_function_mode_vm_SHIFT 3
1324#define lpfc_function_mode_vm_MASK 0x00000001
1325#define lpfc_function_mode_vm_WORD function_mode
1326#define lpfc_function_mode_iscsi_i_SHIFT 4
1327#define lpfc_function_mode_iscsi_i_MASK 0x00000001
1328#define lpfc_function_mode_iscsi_i_WORD function_mode
1329#define lpfc_function_mode_iscsi_t_SHIFT 5
1330#define lpfc_function_mode_iscsi_t_MASK 0x00000001
1331#define lpfc_function_mode_iscsi_t_WORD function_mode
1332#define lpfc_function_mode_fcoe_i_SHIFT 6
1333#define lpfc_function_mode_fcoe_i_MASK 0x00000001
1334#define lpfc_function_mode_fcoe_i_WORD function_mode
1335#define lpfc_function_mode_fcoe_t_SHIFT 7
1336#define lpfc_function_mode_fcoe_t_MASK 0x00000001
1337#define lpfc_function_mode_fcoe_t_WORD function_mode
1338#define lpfc_function_mode_dal_SHIFT 8
1339#define lpfc_function_mode_dal_MASK 0x00000001
1340#define lpfc_function_mode_dal_WORD function_mode
1341#define lpfc_function_mode_lro_SHIFT 9
1342#define lpfc_function_mode_lro_MASK 0x00000001
1343#define lpfc_function_mode_lro_WORD function_mode9
1344#define lpfc_function_mode_flex10_SHIFT 10
1345#define lpfc_function_mode_flex10_MASK 0x00000001
1346#define lpfc_function_mode_flex10_WORD function_mode
1347#define lpfc_function_mode_ncsi_SHIFT 11
1348#define lpfc_function_mode_ncsi_MASK 0x00000001
1349#define lpfc_function_mode_ncsi_WORD function_mode
1350};
1351
1276/* Status field for embedded SLI_CONFIG mailbox command */ 1352/* Status field for embedded SLI_CONFIG mailbox command */
1277#define STATUS_SUCCESS 0x0 1353#define STATUS_SUCCESS 0x0
1278#define STATUS_FAILED 0x1 1354#define STATUS_FAILED 0x1
@@ -1298,6 +1374,7 @@ struct lpfc_mbx_del_fcf_tbl_entry {
1298#define STATUS_ERROR_ACITMAIN 0x2a 1374#define STATUS_ERROR_ACITMAIN 0x2a
1299#define STATUS_REBOOT_REQUIRED 0x2c 1375#define STATUS_REBOOT_REQUIRED 0x2c
1300#define STATUS_FCF_IN_USE 0x3a 1376#define STATUS_FCF_IN_USE 0x3a
1377#define STATUS_FCF_TABLE_EMPTY 0x43
1301 1378
1302struct lpfc_mbx_sli4_config { 1379struct lpfc_mbx_sli4_config {
1303 struct mbox_header header; 1380 struct mbox_header header;
@@ -1349,8 +1426,7 @@ struct lpfc_mbx_reg_vfi {
1349#define lpfc_reg_vfi_fcfi_SHIFT 0 1426#define lpfc_reg_vfi_fcfi_SHIFT 0
1350#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF 1427#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1351#define lpfc_reg_vfi_fcfi_WORD word2 1428#define lpfc_reg_vfi_fcfi_WORD word2
1352 uint32_t word3_rsvd; 1429 uint32_t wwn[2];
1353 uint32_t word4_rsvd;
1354 struct ulp_bde64 bde; 1430 struct ulp_bde64 bde;
1355 uint32_t word8_rsvd; 1431 uint32_t word8_rsvd;
1356 uint32_t word9_rsvd; 1432 uint32_t word9_rsvd;
@@ -1555,6 +1631,11 @@ struct lpfc_mbx_read_rev {
1555#define lpfc_mbx_rd_rev_fcoe_SHIFT 20 1631#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1556#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 1632#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1557#define lpfc_mbx_rd_rev_fcoe_WORD word1 1633#define lpfc_mbx_rd_rev_fcoe_WORD word1
1634#define lpfc_mbx_rd_rev_cee_ver_SHIFT 21
1635#define lpfc_mbx_rd_rev_cee_ver_MASK 0x00000003
1636#define lpfc_mbx_rd_rev_cee_ver_WORD word1
1637#define LPFC_PREDCBX_CEE_MODE 0
1638#define LPFC_DCBX_CEE_MODE 1
1558#define lpfc_mbx_rd_rev_vpd_SHIFT 29 1639#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1559#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 1640#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1560#define lpfc_mbx_rd_rev_vpd_WORD word1 1641#define lpfc_mbx_rd_rev_vpd_WORD word1
@@ -1756,6 +1837,177 @@ struct lpfc_mbx_request_features {
1756#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 1837#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1757}; 1838};
1758 1839
1840struct lpfc_mbx_supp_pages {
1841 uint32_t word1;
1842#define qs_SHIFT 0
1843#define qs_MASK 0x00000001
1844#define qs_WORD word1
1845#define wr_SHIFT 1
1846#define wr_MASK 0x00000001
1847#define wr_WORD word1
1848#define pf_SHIFT 8
1849#define pf_MASK 0x000000ff
1850#define pf_WORD word1
1851#define cpn_SHIFT 16
1852#define cpn_MASK 0x000000ff
1853#define cpn_WORD word1
1854 uint32_t word2;
1855#define list_offset_SHIFT 0
1856#define list_offset_MASK 0x000000ff
1857#define list_offset_WORD word2
1858#define next_offset_SHIFT 8
1859#define next_offset_MASK 0x000000ff
1860#define next_offset_WORD word2
1861#define elem_cnt_SHIFT 16
1862#define elem_cnt_MASK 0x000000ff
1863#define elem_cnt_WORD word2
1864 uint32_t word3;
1865#define pn_0_SHIFT 24
1866#define pn_0_MASK 0x000000ff
1867#define pn_0_WORD word3
1868#define pn_1_SHIFT 16
1869#define pn_1_MASK 0x000000ff
1870#define pn_1_WORD word3
1871#define pn_2_SHIFT 8
1872#define pn_2_MASK 0x000000ff
1873#define pn_2_WORD word3
1874#define pn_3_SHIFT 0
1875#define pn_3_MASK 0x000000ff
1876#define pn_3_WORD word3
1877 uint32_t word4;
1878#define pn_4_SHIFT 24
1879#define pn_4_MASK 0x000000ff
1880#define pn_4_WORD word4
1881#define pn_5_SHIFT 16
1882#define pn_5_MASK 0x000000ff
1883#define pn_5_WORD word4
1884#define pn_6_SHIFT 8
1885#define pn_6_MASK 0x000000ff
1886#define pn_6_WORD word4
1887#define pn_7_SHIFT 0
1888#define pn_7_MASK 0x000000ff
1889#define pn_7_WORD word4
1890 uint32_t rsvd[27];
1891#define LPFC_SUPP_PAGES 0
1892#define LPFC_BLOCK_GUARD_PROFILES 1
1893#define LPFC_SLI4_PARAMETERS 2
1894};
1895
1896struct lpfc_mbx_sli4_params {
1897 uint32_t word1;
1898#define qs_SHIFT 0
1899#define qs_MASK 0x00000001
1900#define qs_WORD word1
1901#define wr_SHIFT 1
1902#define wr_MASK 0x00000001
1903#define wr_WORD word1
1904#define pf_SHIFT 8
1905#define pf_MASK 0x000000ff
1906#define pf_WORD word1
1907#define cpn_SHIFT 16
1908#define cpn_MASK 0x000000ff
1909#define cpn_WORD word1
1910 uint32_t word2;
1911#define if_type_SHIFT 0
1912#define if_type_MASK 0x00000007
1913#define if_type_WORD word2
1914#define sli_rev_SHIFT 4
1915#define sli_rev_MASK 0x0000000f
1916#define sli_rev_WORD word2
1917#define sli_family_SHIFT 8
1918#define sli_family_MASK 0x000000ff
1919#define sli_family_WORD word2
1920#define featurelevel_1_SHIFT 16
1921#define featurelevel_1_MASK 0x000000ff
1922#define featurelevel_1_WORD word2
1923#define featurelevel_2_SHIFT 24
1924#define featurelevel_2_MASK 0x0000001f
1925#define featurelevel_2_WORD word2
1926 uint32_t word3;
1927#define fcoe_SHIFT 0
1928#define fcoe_MASK 0x00000001
1929#define fcoe_WORD word3
1930#define fc_SHIFT 1
1931#define fc_MASK 0x00000001
1932#define fc_WORD word3
1933#define nic_SHIFT 2
1934#define nic_MASK 0x00000001
1935#define nic_WORD word3
1936#define iscsi_SHIFT 3
1937#define iscsi_MASK 0x00000001
1938#define iscsi_WORD word3
1939#define rdma_SHIFT 4
1940#define rdma_MASK 0x00000001
1941#define rdma_WORD word3
1942 uint32_t sge_supp_len;
1943 uint32_t word5;
1944#define if_page_sz_SHIFT 0
1945#define if_page_sz_MASK 0x0000ffff
1946#define if_page_sz_WORD word5
1947#define loopbk_scope_SHIFT 24
1948#define loopbk_scope_MASK 0x0000000f
1949#define loopbk_scope_WORD word5
1950#define rq_db_window_SHIFT 28
1951#define rq_db_window_MASK 0x0000000f
1952#define rq_db_window_WORD word5
1953 uint32_t word6;
1954#define eq_pages_SHIFT 0
1955#define eq_pages_MASK 0x0000000f
1956#define eq_pages_WORD word6
1957#define eqe_size_SHIFT 8
1958#define eqe_size_MASK 0x000000ff
1959#define eqe_size_WORD word6
1960 uint32_t word7;
1961#define cq_pages_SHIFT 0
1962#define cq_pages_MASK 0x0000000f
1963#define cq_pages_WORD word7
1964#define cqe_size_SHIFT 8
1965#define cqe_size_MASK 0x000000ff
1966#define cqe_size_WORD word7
1967 uint32_t word8;
1968#define mq_pages_SHIFT 0
1969#define mq_pages_MASK 0x0000000f
1970#define mq_pages_WORD word8
1971#define mqe_size_SHIFT 8
1972#define mqe_size_MASK 0x000000ff
1973#define mqe_size_WORD word8
1974#define mq_elem_cnt_SHIFT 16
1975#define mq_elem_cnt_MASK 0x000000ff
1976#define mq_elem_cnt_WORD word8
1977 uint32_t word9;
1978#define wq_pages_SHIFT 0
1979#define wq_pages_MASK 0x0000ffff
1980#define wq_pages_WORD word9
1981#define wqe_size_SHIFT 8
1982#define wqe_size_MASK 0x000000ff
1983#define wqe_size_WORD word9
1984 uint32_t word10;
1985#define rq_pages_SHIFT 0
1986#define rq_pages_MASK 0x0000ffff
1987#define rq_pages_WORD word10
1988#define rqe_size_SHIFT 8
1989#define rqe_size_MASK 0x000000ff
1990#define rqe_size_WORD word10
1991 uint32_t word11;
1992#define hdr_pages_SHIFT 0
1993#define hdr_pages_MASK 0x0000000f
1994#define hdr_pages_WORD word11
1995#define hdr_size_SHIFT 8
1996#define hdr_size_MASK 0x0000000f
1997#define hdr_size_WORD word11
1998#define hdr_pp_align_SHIFT 16
1999#define hdr_pp_align_MASK 0x0000ffff
2000#define hdr_pp_align_WORD word11
2001 uint32_t word12;
2002#define sgl_pages_SHIFT 0
2003#define sgl_pages_MASK 0x0000000f
2004#define sgl_pages_WORD word12
2005#define sgl_pp_align_SHIFT 16
2006#define sgl_pp_align_MASK 0x0000ffff
2007#define sgl_pp_align_WORD word12
2008 uint32_t rsvd_13_63[51];
2009};
2010
1759/* Mailbox Completion Queue Error Messages */ 2011/* Mailbox Completion Queue Error Messages */
1760#define MB_CQE_STATUS_SUCCESS 0x0 2012#define MB_CQE_STATUS_SUCCESS 0x0
1761#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 2013#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -1785,6 +2037,7 @@ struct lpfc_mqe {
1785 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; 2037 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1786 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; 2038 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1787 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; 2039 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
2040 struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
1788 struct lpfc_mbx_reg_fcfi reg_fcfi; 2041 struct lpfc_mbx_reg_fcfi reg_fcfi;
1789 struct lpfc_mbx_unreg_fcfi unreg_fcfi; 2042 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1790 struct lpfc_mbx_mq_create mq_create; 2043 struct lpfc_mbx_mq_create mq_create;
@@ -1804,6 +2057,9 @@ struct lpfc_mqe {
1804 struct lpfc_mbx_read_config rd_config; 2057 struct lpfc_mbx_read_config rd_config;
1805 struct lpfc_mbx_request_features req_ftrs; 2058 struct lpfc_mbx_request_features req_ftrs;
1806 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; 2059 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
2060 struct lpfc_mbx_query_fw_cfg query_fw_cfg;
2061 struct lpfc_mbx_supp_pages supp_pages;
2062 struct lpfc_mbx_sli4_params sli4_params;
1807 struct lpfc_mbx_nop nop; 2063 struct lpfc_mbx_nop nop;
1808 } un; 2064 } un;
1809}; 2065};
@@ -1880,12 +2136,15 @@ struct lpfc_acqe_link {
1880#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 2136#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1881#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 2137#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1882#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 2138#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
2139#define lpfc_acqe_qos_link_speed_SHIFT 16
2140#define lpfc_acqe_qos_link_speed_MASK 0x0000FFFF
2141#define lpfc_acqe_qos_link_speed_WORD word1
1883 uint32_t event_tag; 2142 uint32_t event_tag;
1884 uint32_t trailer; 2143 uint32_t trailer;
1885}; 2144};
1886 2145
1887struct lpfc_acqe_fcoe { 2146struct lpfc_acqe_fcoe {
1888 uint32_t fcf_index; 2147 uint32_t index;
1889 uint32_t word1; 2148 uint32_t word1;
1890#define lpfc_acqe_fcoe_fcf_count_SHIFT 0 2149#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1891#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF 2150#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
@@ -1896,6 +2155,8 @@ struct lpfc_acqe_fcoe {
1896#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 2155#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1897#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 2156#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1898#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 2157#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
2158#define LPFC_FCOE_EVENT_TYPE_CVL 0x4
2159#define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD 0x5
1899 uint32_t event_tag; 2160 uint32_t event_tag;
1900 uint32_t trailer; 2161 uint32_t trailer;
1901}; 2162};
@@ -1921,12 +2182,13 @@ struct lpfc_bmbx_create {
1921#define SGL_ALIGN_SZ 64 2182#define SGL_ALIGN_SZ 64
1922#define SGL_PAGE_SIZE 4096 2183#define SGL_PAGE_SIZE 4096
1923/* align SGL addr on a size boundary - adjust address up */ 2184/* align SGL addr on a size boundary - adjust address up */
1924#define NO_XRI ((uint16_t)-1) 2185#define NO_XRI ((uint16_t)-1)
2186
1925struct wqe_common { 2187struct wqe_common {
1926 uint32_t word6; 2188 uint32_t word6;
1927#define wqe_xri_SHIFT 0 2189#define wqe_xri_tag_SHIFT 0
1928#define wqe_xri_MASK 0x0000FFFF 2190#define wqe_xri_tag_MASK 0x0000FFFF
1929#define wqe_xri_WORD word6 2191#define wqe_xri_tag_WORD word6
1930#define wqe_ctxt_tag_SHIFT 16 2192#define wqe_ctxt_tag_SHIFT 16
1931#define wqe_ctxt_tag_MASK 0x0000FFFF 2193#define wqe_ctxt_tag_MASK 0x0000FFFF
1932#define wqe_ctxt_tag_WORD word6 2194#define wqe_ctxt_tag_WORD word6
@@ -1987,7 +2249,7 @@ struct wqe_common {
1987#define wqe_wqec_MASK 0x00000001 2249#define wqe_wqec_MASK 0x00000001
1988#define wqe_wqec_WORD word11 2250#define wqe_wqec_WORD word11
1989#define wqe_cqid_SHIFT 16 2251#define wqe_cqid_SHIFT 16
1990#define wqe_cqid_MASK 0x000003ff 2252#define wqe_cqid_MASK 0x0000ffff
1991#define wqe_cqid_WORD word11 2253#define wqe_cqid_WORD word11
1992}; 2254};
1993 2255
@@ -1996,6 +2258,9 @@ struct wqe_did {
1996#define wqe_els_did_SHIFT 0 2258#define wqe_els_did_SHIFT 0
1997#define wqe_els_did_MASK 0x00FFFFFF 2259#define wqe_els_did_MASK 0x00FFFFFF
1998#define wqe_els_did_WORD word5 2260#define wqe_els_did_WORD word5
2261#define wqe_xmit_bls_pt_SHIFT 28
2262#define wqe_xmit_bls_pt_MASK 0x00000003
2263#define wqe_xmit_bls_pt_WORD word5
1999#define wqe_xmit_bls_ar_SHIFT 30 2264#define wqe_xmit_bls_ar_SHIFT 30
2000#define wqe_xmit_bls_ar_MASK 0x00000001 2265#define wqe_xmit_bls_ar_MASK 0x00000001
2001#define wqe_xmit_bls_ar_WORD word5 2266#define wqe_xmit_bls_ar_WORD word5
@@ -2044,6 +2309,23 @@ struct xmit_els_rsp64_wqe {
2044 2309
2045struct xmit_bls_rsp64_wqe { 2310struct xmit_bls_rsp64_wqe {
2046 uint32_t payload0; 2311 uint32_t payload0;
2312/* Payload0 for BA_ACC */
2313#define xmit_bls_rsp64_acc_seq_id_SHIFT 16
2314#define xmit_bls_rsp64_acc_seq_id_MASK 0x000000ff
2315#define xmit_bls_rsp64_acc_seq_id_WORD payload0
2316#define xmit_bls_rsp64_acc_seq_id_vald_SHIFT 24
2317#define xmit_bls_rsp64_acc_seq_id_vald_MASK 0x000000ff
2318#define xmit_bls_rsp64_acc_seq_id_vald_WORD payload0
2319/* Payload0 for BA_RJT */
2320#define xmit_bls_rsp64_rjt_vspec_SHIFT 0
2321#define xmit_bls_rsp64_rjt_vspec_MASK 0x000000ff
2322#define xmit_bls_rsp64_rjt_vspec_WORD payload0
2323#define xmit_bls_rsp64_rjt_expc_SHIFT 8
2324#define xmit_bls_rsp64_rjt_expc_MASK 0x000000ff
2325#define xmit_bls_rsp64_rjt_expc_WORD payload0
2326#define xmit_bls_rsp64_rjt_rsnc_SHIFT 16
2327#define xmit_bls_rsp64_rjt_rsnc_MASK 0x000000ff
2328#define xmit_bls_rsp64_rjt_rsnc_WORD payload0
2047 uint32_t word1; 2329 uint32_t word1;
2048#define xmit_bls_rsp64_rxid_SHIFT 0 2330#define xmit_bls_rsp64_rxid_SHIFT 0
2049#define xmit_bls_rsp64_rxid_MASK 0x0000ffff 2331#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
@@ -2052,18 +2334,19 @@ struct xmit_bls_rsp64_wqe {
2052#define xmit_bls_rsp64_oxid_MASK 0x0000ffff 2334#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2053#define xmit_bls_rsp64_oxid_WORD word1 2335#define xmit_bls_rsp64_oxid_WORD word1
2054 uint32_t word2; 2336 uint32_t word2;
2055#define xmit_bls_rsp64_seqcntlo_SHIFT 0 2337#define xmit_bls_rsp64_seqcnthi_SHIFT 0
2056#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2057#define xmit_bls_rsp64_seqcntlo_WORD word2
2058#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2059#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff 2338#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2060#define xmit_bls_rsp64_seqcnthi_WORD word2 2339#define xmit_bls_rsp64_seqcnthi_WORD word2
2340#define xmit_bls_rsp64_seqcntlo_SHIFT 16
2341#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2342#define xmit_bls_rsp64_seqcntlo_WORD word2
2061 uint32_t rsrvd3; 2343 uint32_t rsrvd3;
2062 uint32_t rsrvd4; 2344 uint32_t rsrvd4;
2063 struct wqe_did wqe_dest; 2345 struct wqe_did wqe_dest;
2064 struct wqe_common wqe_com; /* words 6-11 */ 2346 struct wqe_common wqe_com; /* words 6-11 */
2065 uint32_t rsvd_12_15[4]; 2347 uint32_t rsvd_12_15[4];
2066}; 2348};
2349
2067struct wqe_rctl_dfctl { 2350struct wqe_rctl_dfctl {
2068 uint32_t word5; 2351 uint32_t word5;
2069#define wqe_si_SHIFT 2 2352#define wqe_si_SHIFT 2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 562d8cee874b..774663e8e1fe 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,6 +28,8 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h>
32#include <linux/slab.h>
31 33
32#include <scsi/scsi.h> 34#include <scsi/scsi.h>
33#include <scsi/scsi_device.h> 35#include <scsi/scsi_device.h>
@@ -349,7 +351,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
349 mb = &pmb->u.mb; 351 mb = &pmb->u.mb;
350 352
351 /* Get login parameters for NID. */ 353 /* Get login parameters for NID. */
352 lpfc_read_sparam(phba, pmb, 0); 354 rc = lpfc_read_sparam(phba, pmb, 0);
355 if (rc) {
356 mempool_free(pmb, phba->mbox_mem_pool);
357 return -ENOMEM;
358 }
359
353 pmb->vport = vport; 360 pmb->vport = vport;
354 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -358,7 +365,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
358 mb->mbxCommand, mb->mbxStatus); 365 mb->mbxCommand, mb->mbxStatus);
359 phba->link_state = LPFC_HBA_ERROR; 366 phba->link_state = LPFC_HBA_ERROR;
360 mp = (struct lpfc_dmabuf *) pmb->context1; 367 mp = (struct lpfc_dmabuf *) pmb->context1;
361 mempool_free( pmb, phba->mbox_mem_pool); 368 mempool_free(pmb, phba->mbox_mem_pool);
362 lpfc_mbuf_free(phba, mp->virt, mp->phys); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys);
363 kfree(mp); 370 kfree(mp);
364 return -EIO; 371 return -EIO;
@@ -543,7 +550,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
543 mempool_free(pmb, phba->mbox_mem_pool); 550 mempool_free(pmb, phba->mbox_mem_pool);
544 return -EIO; 551 return -EIO;
545 } 552 }
546 } else { 553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
547 lpfc_init_link(phba, pmb, phba->cfg_topology, 554 lpfc_init_link(phba, pmb, phba->cfg_topology,
548 phba->cfg_link_speed); 555 phba->cfg_link_speed);
549 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -570,6 +577,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
570 } 577 }
571 /* MBOX buffer will be freed in mbox compl */ 578 /* MBOX buffer will be freed in mbox compl */
572 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580 if (!pmb) {
581 phba->link_state = LPFC_HBA_ERROR;
582 return -ENOMEM;
583 }
584
573 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 585 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
574 pmb->mbox_cmpl = lpfc_config_async_cmpl; 586 pmb->mbox_cmpl = lpfc_config_async_cmpl;
575 pmb->vport = phba->pport; 587 pmb->vport = phba->pport;
@@ -587,6 +599,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
587 599
588 /* Get Option rom version */ 600 /* Get Option rom version */
589 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602 if (!pmb) {
603 phba->link_state = LPFC_HBA_ERROR;
604 return -ENOMEM;
605 }
606
590 lpfc_dump_wakeup_param(phba, pmb); 607 lpfc_dump_wakeup_param(phba, pmb);
591 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
592 pmb->vport = phba->pport; 609 pmb->vport = phba->pport;
@@ -602,6 +619,102 @@ lpfc_config_port_post(struct lpfc_hba *phba)
602} 619}
603 620
604/** 621/**
622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure.
624 *
625 * This routine will issue the INIT_LINK mailbox command call.
626 * It is available to other drivers through the lpfc_hba data
627 * structure for use as a delayed link up mechanism with the
628 * module parameter lpfc_suppress_link_up.
629 *
630 * Return code
631 * 0 - success
632 * Any other value - error
633 **/
634int
635lpfc_hba_init_link(struct lpfc_hba *phba)
636{
637 struct lpfc_vport *vport = phba->pport;
638 LPFC_MBOXQ_t *pmb;
639 MAILBOX_t *mb;
640 int rc;
641
642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 if (!pmb) {
644 phba->link_state = LPFC_HBA_ERROR;
645 return -ENOMEM;
646 }
647 mb = &pmb->u.mb;
648 pmb->vport = vport;
649
650 lpfc_init_link(phba, pmb, phba->cfg_topology,
651 phba->cfg_link_speed);
652 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
653 lpfc_set_loopback_flag(phba);
654 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
655 if (rc != MBX_SUCCESS) {
656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657 "0498 Adapter failed to init, mbxCmd x%x "
658 "INIT_LINK, mbxStatus x%x\n",
659 mb->mbxCommand, mb->mbxStatus);
660 /* Clear all interrupt enable conditions */
661 writel(0, phba->HCregaddr);
662 readl(phba->HCregaddr); /* flush */
663 /* Clear all pending interrupts */
664 writel(0xffffffff, phba->HAregaddr);
665 readl(phba->HAregaddr); /* flush */
666 phba->link_state = LPFC_HBA_ERROR;
667 if (rc != MBX_BUSY)
668 mempool_free(pmb, phba->mbox_mem_pool);
669 return -EIO;
670 }
671 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
672
673 return 0;
674}
675
676/**
677 * lpfc_hba_down_link - this routine downs the FC link
678 *
679 * This routine will issue the DOWN_LINK mailbox command call.
680 * It is available to other drivers through the lpfc_hba data
681 * structure for use to stop the link.
682 *
683 * Return code
684 * 0 - success
685 * Any other value - error
686 **/
687int
688lpfc_hba_down_link(struct lpfc_hba *phba)
689{
690 LPFC_MBOXQ_t *pmb;
691 int rc;
692
693 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
694 if (!pmb) {
695 phba->link_state = LPFC_HBA_ERROR;
696 return -ENOMEM;
697 }
698
699 lpfc_printf_log(phba,
700 KERN_ERR, LOG_INIT,
701 "0491 Adapter Link is disabled.\n");
702 lpfc_down_link(phba, pmb);
703 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
704 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
705 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
706 lpfc_printf_log(phba,
707 KERN_ERR, LOG_INIT,
708 "2522 Adapter failed to issue DOWN_LINK"
709 " mbox command rc 0x%x\n", rc);
710
711 mempool_free(pmb, phba->mbox_mem_pool);
712 return -EIO;
713 }
714 return 0;
715}
716
717/**
605 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 718 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
606 * @phba: pointer to lpfc HBA data structure. 719 * @phba: pointer to lpfc HBA data structure.
607 * 720 *
@@ -645,7 +758,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
645 * down the SLI Layer. 758 * down the SLI Layer.
646 * 759 *
647 * Return codes 760 * Return codes
648 * 0 - sucess. 761 * 0 - success.
649 * Any other value - error. 762 * Any other value - error.
650 **/ 763 **/
651static int 764static int
@@ -700,7 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
700 * down the SLI Layer. 813 * down the SLI Layer.
701 * 814 *
702 * Return codes 815 * Return codes
703 * 0 - sucess. 816 * 0 - success.
704 * Any other value - error. 817 * Any other value - error.
705 **/ 818 **/
706static int 819static int
@@ -710,6 +823,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
710 LIST_HEAD(aborts); 823 LIST_HEAD(aborts);
711 int ret; 824 int ret;
712 unsigned long iflag = 0; 825 unsigned long iflag = 0;
826 struct lpfc_sglq *sglq_entry = NULL;
827
713 ret = lpfc_hba_down_post_s3(phba); 828 ret = lpfc_hba_down_post_s3(phba);
714 if (ret) 829 if (ret)
715 return ret; 830 return ret;
@@ -725,6 +840,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
725 * list. 840 * list.
726 */ 841 */
727 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 842 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
843 list_for_each_entry(sglq_entry,
844 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
845 sglq_entry->state = SGL_FREED;
846
728 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 847 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
729 &phba->sli4_hba.lpfc_sgl_list); 848 &phba->sli4_hba.lpfc_sgl_list);
730 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 849 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -755,7 +874,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
755 * uninitialization after the HBA is reset when bring down the SLI Layer. 874 * uninitialization after the HBA is reset when bring down the SLI Layer.
756 * 875 *
757 * Return codes 876 * Return codes
758 * 0 - sucess. 877 * 0 - success.
759 * Any other value - error. 878 * Any other value - error.
760 **/ 879 **/
761int 880int
@@ -852,12 +971,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
852void 971void
853lpfc_hb_timeout_handler(struct lpfc_hba *phba) 972lpfc_hb_timeout_handler(struct lpfc_hba *phba)
854{ 973{
974 struct lpfc_vport **vports;
855 LPFC_MBOXQ_t *pmboxq; 975 LPFC_MBOXQ_t *pmboxq;
856 struct lpfc_dmabuf *buf_ptr; 976 struct lpfc_dmabuf *buf_ptr;
857 int retval; 977 int retval, i;
858 struct lpfc_sli *psli = &phba->sli; 978 struct lpfc_sli *psli = &phba->sli;
859 LIST_HEAD(completions); 979 LIST_HEAD(completions);
860 980
981 vports = lpfc_create_vport_work_array(phba);
982 if (vports != NULL)
983 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
984 lpfc_rcv_seq_check_edtov(vports[i]);
985 lpfc_destroy_vport_work_array(phba, vports);
986
861 if ((phba->link_state == LPFC_HBA_ERROR) || 987 if ((phba->link_state == LPFC_HBA_ERROR) ||
862 (phba->pport->load_flag & FC_UNLOADING) || 988 (phba->pport->load_flag & FC_UNLOADING) ||
863 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 989 (phba->pport->fc_flag & FC_OFFLINE_MODE))
@@ -1254,7 +1380,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1254 * routine from the API jump table function pointer from the lpfc_hba struct. 1380 * routine from the API jump table function pointer from the lpfc_hba struct.
1255 * 1381 *
1256 * Return codes 1382 * Return codes
1257 * 0 - sucess. 1383 * 0 - success.
1258 * Any other value - error. 1384 * Any other value - error.
1259 **/ 1385 **/
1260void 1386void
@@ -1521,10 +1647,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1521 int GE = 0; 1647 int GE = 0;
1522 int oneConnect = 0; /* default is not a oneConnect */ 1648 int oneConnect = 0; /* default is not a oneConnect */
1523 struct { 1649 struct {
1524 char * name; 1650 char *name;
1525 int max_speed; 1651 char *bus;
1526 char * bus; 1652 char *function;
1527 } m = {"<Unknown>", 0, ""}; 1653 } m = {"<Unknown>", "", ""};
1528 1654
1529 if (mdp && mdp[0] != '\0' 1655 if (mdp && mdp[0] != '\0'
1530 && descp && descp[0] != '\0') 1656 && descp && descp[0] != '\0')
@@ -1545,132 +1671,155 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1545 1671
1546 switch (dev_id) { 1672 switch (dev_id) {
1547 case PCI_DEVICE_ID_FIREFLY: 1673 case PCI_DEVICE_ID_FIREFLY:
1548 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1674 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1549 break; 1675 break;
1550 case PCI_DEVICE_ID_SUPERFLY: 1676 case PCI_DEVICE_ID_SUPERFLY:
1551 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1677 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1552 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1678 m = (typeof(m)){"LP7000", "PCI",
1679 "Fibre Channel Adapter"};
1553 else 1680 else
1554 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1681 m = (typeof(m)){"LP7000E", "PCI",
1682 "Fibre Channel Adapter"};
1555 break; 1683 break;
1556 case PCI_DEVICE_ID_DRAGONFLY: 1684 case PCI_DEVICE_ID_DRAGONFLY:
1557 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1685 m = (typeof(m)){"LP8000", "PCI",
1686 "Fibre Channel Adapter"};
1558 break; 1687 break;
1559 case PCI_DEVICE_ID_CENTAUR: 1688 case PCI_DEVICE_ID_CENTAUR:
1560 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1689 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1561 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1690 m = (typeof(m)){"LP9002", "PCI",
1691 "Fibre Channel Adapter"};
1562 else 1692 else
1563 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1693 m = (typeof(m)){"LP9000", "PCI",
1694 "Fibre Channel Adapter"};
1564 break; 1695 break;
1565 case PCI_DEVICE_ID_RFLY: 1696 case PCI_DEVICE_ID_RFLY:
1566 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1697 m = (typeof(m)){"LP952", "PCI",
1698 "Fibre Channel Adapter"};
1567 break; 1699 break;
1568 case PCI_DEVICE_ID_PEGASUS: 1700 case PCI_DEVICE_ID_PEGASUS:
1569 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1701 m = (typeof(m)){"LP9802", "PCI-X",
1702 "Fibre Channel Adapter"};
1570 break; 1703 break;
1571 case PCI_DEVICE_ID_THOR: 1704 case PCI_DEVICE_ID_THOR:
1572 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1705 m = (typeof(m)){"LP10000", "PCI-X",
1706 "Fibre Channel Adapter"};
1573 break; 1707 break;
1574 case PCI_DEVICE_ID_VIPER: 1708 case PCI_DEVICE_ID_VIPER:
1575 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1709 m = (typeof(m)){"LPX1000", "PCI-X",
1710 "Fibre Channel Adapter"};
1576 break; 1711 break;
1577 case PCI_DEVICE_ID_PFLY: 1712 case PCI_DEVICE_ID_PFLY:
1578 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1713 m = (typeof(m)){"LP982", "PCI-X",
1714 "Fibre Channel Adapter"};
1579 break; 1715 break;
1580 case PCI_DEVICE_ID_TFLY: 1716 case PCI_DEVICE_ID_TFLY:
1581 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1717 m = (typeof(m)){"LP1050", "PCI-X",
1718 "Fibre Channel Adapter"};
1582 break; 1719 break;
1583 case PCI_DEVICE_ID_HELIOS: 1720 case PCI_DEVICE_ID_HELIOS:
1584 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1721 m = (typeof(m)){"LP11000", "PCI-X2",
1722 "Fibre Channel Adapter"};
1585 break; 1723 break;
1586 case PCI_DEVICE_ID_HELIOS_SCSP: 1724 case PCI_DEVICE_ID_HELIOS_SCSP:
1587 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1725 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1726 "Fibre Channel Adapter"};
1588 break; 1727 break;
1589 case PCI_DEVICE_ID_HELIOS_DCSP: 1728 case PCI_DEVICE_ID_HELIOS_DCSP:
1590 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1729 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1730 "Fibre Channel Adapter"};
1591 break; 1731 break;
1592 case PCI_DEVICE_ID_NEPTUNE: 1732 case PCI_DEVICE_ID_NEPTUNE:
1593 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1733 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1594 break; 1734 break;
1595 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1735 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1596 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1736 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1597 break; 1737 break;
1598 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1738 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1599 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1739 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1600 break; 1740 break;
1601 case PCI_DEVICE_ID_BMID: 1741 case PCI_DEVICE_ID_BMID:
1602 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1742 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1603 break; 1743 break;
1604 case PCI_DEVICE_ID_BSMB: 1744 case PCI_DEVICE_ID_BSMB:
1605 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1745 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1606 break; 1746 break;
1607 case PCI_DEVICE_ID_ZEPHYR: 1747 case PCI_DEVICE_ID_ZEPHYR:
1608 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1748 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1609 break; 1749 break;
1610 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1750 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1611 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1751 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1612 break; 1752 break;
1613 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1753 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1614 m = (typeof(m)){"LP2105", max_speed, "PCIe"}; 1754 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1615 GE = 1; 1755 GE = 1;
1616 break; 1756 break;
1617 case PCI_DEVICE_ID_ZMID: 1757 case PCI_DEVICE_ID_ZMID:
1618 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1758 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1619 break; 1759 break;
1620 case PCI_DEVICE_ID_ZSMB: 1760 case PCI_DEVICE_ID_ZSMB:
1621 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1761 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1622 break; 1762 break;
1623 case PCI_DEVICE_ID_LP101: 1763 case PCI_DEVICE_ID_LP101:
1624 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1764 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1625 break; 1765 break;
1626 case PCI_DEVICE_ID_LP10000S: 1766 case PCI_DEVICE_ID_LP10000S:
1627 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1767 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1628 break; 1768 break;
1629 case PCI_DEVICE_ID_LP11000S: 1769 case PCI_DEVICE_ID_LP11000S:
1630 m = (typeof(m)){"LP11000-S", max_speed, 1770 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1631 "PCI-X2"};
1632 break; 1771 break;
1633 case PCI_DEVICE_ID_LPE11000S: 1772 case PCI_DEVICE_ID_LPE11000S:
1634 m = (typeof(m)){"LPe11000-S", max_speed, 1773 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1635 "PCIe"};
1636 break; 1774 break;
1637 case PCI_DEVICE_ID_SAT: 1775 case PCI_DEVICE_ID_SAT:
1638 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1776 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1639 break; 1777 break;
1640 case PCI_DEVICE_ID_SAT_MID: 1778 case PCI_DEVICE_ID_SAT_MID:
1641 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1779 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1642 break; 1780 break;
1643 case PCI_DEVICE_ID_SAT_SMB: 1781 case PCI_DEVICE_ID_SAT_SMB:
1644 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1782 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1645 break; 1783 break;
1646 case PCI_DEVICE_ID_SAT_DCSP: 1784 case PCI_DEVICE_ID_SAT_DCSP:
1647 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1785 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1648 break; 1786 break;
1649 case PCI_DEVICE_ID_SAT_SCSP: 1787 case PCI_DEVICE_ID_SAT_SCSP:
1650 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1788 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1651 break; 1789 break;
1652 case PCI_DEVICE_ID_SAT_S: 1790 case PCI_DEVICE_ID_SAT_S:
1653 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1791 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1654 break; 1792 break;
1655 case PCI_DEVICE_ID_HORNET: 1793 case PCI_DEVICE_ID_HORNET:
1656 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1794 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1657 GE = 1; 1795 GE = 1;
1658 break; 1796 break;
1659 case PCI_DEVICE_ID_PROTEUS_VF: 1797 case PCI_DEVICE_ID_PROTEUS_VF:
1660 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1798 m = (typeof(m)){"LPev12000", "PCIe IOV",
1799 "Fibre Channel Adapter"};
1661 break; 1800 break;
1662 case PCI_DEVICE_ID_PROTEUS_PF: 1801 case PCI_DEVICE_ID_PROTEUS_PF:
1663 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1802 m = (typeof(m)){"LPev12000", "PCIe IOV",
1803 "Fibre Channel Adapter"};
1664 break; 1804 break;
1665 case PCI_DEVICE_ID_PROTEUS_S: 1805 case PCI_DEVICE_ID_PROTEUS_S:
1666 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1806 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1807 "Fibre Channel Adapter"};
1667 break; 1808 break;
1668 case PCI_DEVICE_ID_TIGERSHARK: 1809 case PCI_DEVICE_ID_TIGERSHARK:
1669 oneConnect = 1; 1810 oneConnect = 1;
1670 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1811 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1812 break;
1813 case PCI_DEVICE_ID_TOMCAT:
1814 oneConnect = 1;
1815 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1816 break;
1817 case PCI_DEVICE_ID_FALCON:
1818 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1819 "EmulexSecure Fibre"};
1671 break; 1820 break;
1672 default: 1821 default:
1673 m = (typeof(m)){ NULL }; 1822 m = (typeof(m)){"Unknown", "", ""};
1674 break; 1823 break;
1675 } 1824 }
1676 1825
@@ -1682,17 +1831,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1682 if (descp && descp[0] == '\0') { 1831 if (descp && descp[0] == '\0') {
1683 if (oneConnect) 1832 if (oneConnect)
1684 snprintf(descp, 255, 1833 snprintf(descp, 255,
1685 "Emulex OneConnect %s, FCoE Initiator, Port %s", 1834 "Emulex OneConnect %s, %s Initiator, Port %s",
1686 m.name, 1835 m.name, m.function,
1687 phba->Port); 1836 phba->Port);
1688 else 1837 else
1689 snprintf(descp, 255, 1838 snprintf(descp, 255,
1690 "Emulex %s %d%s %s %s", 1839 "Emulex %s %d%s %s %s",
1691 m.name, m.max_speed, 1840 m.name, max_speed, (GE) ? "GE" : "Gb",
1692 (GE) ? "GE" : "Gb", 1841 m.bus, m.function);
1693 m.bus,
1694 (GE) ? "FCoE Adapter" :
1695 "Fibre Channel Adapter");
1696 } 1842 }
1697} 1843}
1698 1844
@@ -2045,6 +2191,46 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
2045} 2191}
2046 2192
2047/** 2193/**
2194 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2195 * @phba: pointer to lpfc hba data structure.
2196 *
2197 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2198 * caller of this routine should already hold the host lock.
2199 **/
2200void
2201__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2202{
2203 /* Clear pending FCF rediscovery wait and failover in progress flags */
2204 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2205 FCF_DEAD_DISC |
2206 FCF_ACVL_DISC);
2207 /* Now, try to stop the timer */
2208 del_timer(&phba->fcf.redisc_wait);
2209}
2210
2211/**
2212 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2213 * @phba: pointer to lpfc hba data structure.
2214 *
2215 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2216 * checks whether the FCF rediscovery wait timer is pending with the host
2217 * lock held before proceeding with disabling the timer and clearing the
2218 * wait timer pendig flag.
2219 **/
2220void
2221lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2222{
2223 spin_lock_irq(&phba->hbalock);
2224 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2225 /* FCF rediscovery timer already fired or stopped */
2226 spin_unlock_irq(&phba->hbalock);
2227 return;
2228 }
2229 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2230 spin_unlock_irq(&phba->hbalock);
2231}
2232
2233/**
2048 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2234 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2049 * @phba: pointer to lpfc hba data structure. 2235 * @phba: pointer to lpfc hba data structure.
2050 * 2236 *
@@ -2068,6 +2254,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
2068 break; 2254 break;
2069 case LPFC_PCI_DEV_OC: 2255 case LPFC_PCI_DEV_OC:
2070 /* Stop any OneConnect device sepcific driver timers */ 2256 /* Stop any OneConnect device sepcific driver timers */
2257 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2071 break; 2258 break;
2072 default: 2259 default:
2073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2200,6 +2387,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2200 struct lpfc_vport *vport = phba->pport; 2387 struct lpfc_vport *vport = phba->pport;
2201 struct lpfc_nodelist *ndlp, *next_ndlp; 2388 struct lpfc_nodelist *ndlp, *next_ndlp;
2202 struct lpfc_vport **vports; 2389 struct lpfc_vport **vports;
2390 struct Scsi_Host *shost;
2203 int i; 2391 int i;
2204 2392
2205 if (vport->fc_flag & FC_OFFLINE_MODE) 2393 if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -2213,11 +2401,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2213 vports = lpfc_create_vport_work_array(phba); 2401 vports = lpfc_create_vport_work_array(phba);
2214 if (vports != NULL) { 2402 if (vports != NULL) {
2215 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2403 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2216 struct Scsi_Host *shost;
2217
2218 if (vports[i]->load_flag & FC_UNLOADING) 2404 if (vports[i]->load_flag & FC_UNLOADING)
2219 continue; 2405 continue;
2220 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 2406 shost = lpfc_shost_from_vport(vports[i]);
2407 spin_lock_irq(shost->host_lock);
2408 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2409 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2410 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2411 spin_unlock_irq(shost->host_lock);
2412
2221 shost = lpfc_shost_from_vport(vports[i]); 2413 shost = lpfc_shost_from_vport(vports[i]);
2222 list_for_each_entry_safe(ndlp, next_ndlp, 2414 list_for_each_entry_safe(ndlp, next_ndlp,
2223 &vports[i]->fc_nodes, 2415 &vports[i]->fc_nodes,
@@ -2308,6 +2500,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2308 2500
2309 spin_lock_irq(&phba->hbalock); 2501 spin_lock_irq(&phba->hbalock);
2310 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2502 /* Release all the lpfc_scsi_bufs maintained by this host. */
2503 spin_lock(&phba->scsi_buf_list_lock);
2311 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2504 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2312 list_del(&sb->list); 2505 list_del(&sb->list);
2313 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2506 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
@@ -2315,6 +2508,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2315 kfree(sb); 2508 kfree(sb);
2316 phba->total_scsi_bufs--; 2509 phba->total_scsi_bufs--;
2317 } 2510 }
2511 spin_unlock(&phba->scsi_buf_list_lock);
2318 2512
2319 /* Release all the lpfc_iocbq entries maintained by this host. */ 2513 /* Release all the lpfc_iocbq entries maintained by this host. */
2320 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2514 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2322,9 +2516,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2322 kfree(io); 2516 kfree(io);
2323 phba->total_iocbq_bufs--; 2517 phba->total_iocbq_bufs--;
2324 } 2518 }
2325
2326 spin_unlock_irq(&phba->hbalock); 2519 spin_unlock_irq(&phba->hbalock);
2327
2328 return 0; 2520 return 0;
2329} 2521}
2330 2522
@@ -2373,7 +2565,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2373 shost->this_id = -1; 2565 shost->this_id = -1;
2374 shost->max_cmd_len = 16; 2566 shost->max_cmd_len = 16;
2375 if (phba->sli_rev == LPFC_SLI_REV4) { 2567 if (phba->sli_rev == LPFC_SLI_REV4) {
2376 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; 2568 shost->dma_boundary =
2569 phba->sli4_hba.pc_sli4_params.sge_supp_len;
2377 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2378 } 2571 }
2379 2572
@@ -2407,8 +2600,16 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2407 init_timer(&vport->els_tmofunc); 2600 init_timer(&vport->els_tmofunc);
2408 vport->els_tmofunc.function = lpfc_els_timeout; 2601 vport->els_tmofunc.function = lpfc_els_timeout;
2409 vport->els_tmofunc.data = (unsigned long)vport; 2602 vport->els_tmofunc.data = (unsigned long)vport;
2603 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2604 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2605 /* check for menlo minimum sg count */
2606 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2607 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2608 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2609 }
2610 }
2410 2611
2411 error = scsi_add_host(shost, dev); 2612 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2412 if (error) 2613 if (error)
2413 goto out_put_shost; 2614 goto out_put_shost;
2414 2615
@@ -2622,8 +2823,6 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
2622 lpfc_stop_hba_timers(phba); 2823 lpfc_stop_hba_timers(phba);
2623 phba->pport->work_port_events = 0; 2824 phba->pport->work_port_events = 0;
2624 phba->sli4_hba.intr_enable = 0; 2825 phba->sli4_hba.intr_enable = 0;
2625 /* Hard clear it for now, shall have more graceful way to wait later */
2626 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2627} 2826}
2628 2827
2629/** 2828/**
@@ -2675,7 +2874,7 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2675 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2874 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2676 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2875 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2677 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2876 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2678 phba->fcf.fcf_indx); 2877 phba->fcf.current_rec.fcf_indx);
2679 2878
2680 if (!phba->sli4_hba.intr_enable) 2879 if (!phba->sli4_hba.intr_enable)
2681 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2880 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -2699,6 +2898,117 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2699} 2898}
2700 2899
2701/** 2900/**
2901 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2902 * @phba: Pointer to hba for which this call is being executed.
2903 *
2904 * This routine starts the timer waiting for the FCF rediscovery to complete.
2905 **/
2906void
2907lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2908{
2909 unsigned long fcf_redisc_wait_tmo =
2910 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2911 /* Start fcf rediscovery wait period timer */
2912 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2913 spin_lock_irq(&phba->hbalock);
2914 /* Allow action to new fcf asynchronous event */
2915 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2916 /* Mark the FCF rediscovery pending state */
2917 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2918 spin_unlock_irq(&phba->hbalock);
2919}
2920
2921/**
2922 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2923 * @ptr: Map to lpfc_hba data structure pointer.
2924 *
2925 * This routine is invoked when waiting for FCF table rediscover has been
2926 * timed out. If new FCF record(s) has (have) been discovered during the
2927 * wait period, a new FCF event shall be added to the FCOE async event
2928 * list, and then worker thread shall be waked up for processing from the
2929 * worker thread context.
2930 **/
2931void
2932lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2933{
2934 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2935
2936 /* Don't send FCF rediscovery event if timer cancelled */
2937 spin_lock_irq(&phba->hbalock);
2938 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2939 spin_unlock_irq(&phba->hbalock);
2940 return;
2941 }
2942 /* Clear FCF rediscovery timer pending flag */
2943 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2944 /* FCF rediscovery event to worker thread */
2945 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2946 spin_unlock_irq(&phba->hbalock);
2947 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2948 "2776 FCF rediscover wait timer expired, post "
2949 "a worker thread event for FCF table scan\n");
2950 /* wake up worker thread */
2951 lpfc_worker_wake_up(phba);
2952}
2953
2954/**
2955 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2956 * @phba: pointer to lpfc hba data structure.
2957 *
2958 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2959 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2960 * was successful and the firmware supports FCoE. Any other return indicates
2961 * a error. It is assumed that this function will be called before interrupts
2962 * are enabled.
2963 **/
2964static int
2965lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2966{
2967 int rc = 0;
2968 LPFC_MBOXQ_t *mboxq;
2969 struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2970 uint32_t length;
2971 uint32_t shdr_status, shdr_add_status;
2972
2973 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2974 if (!mboxq) {
2975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2976 "2621 Failed to allocate mbox for "
2977 "query firmware config cmd\n");
2978 return -ENOMEM;
2979 }
2980 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2981 length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2982 sizeof(struct lpfc_sli4_cfg_mhdr));
2983 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2984 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2985 length, LPFC_SLI4_MBX_EMBED);
2986 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2987 /* The IOCTL status is embedded in the mailbox subheader. */
2988 shdr_status = bf_get(lpfc_mbox_hdr_status,
2989 &query_fw_cfg->header.cfg_shdr.response);
2990 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2991 &query_fw_cfg->header.cfg_shdr.response);
2992 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "2622 Query Firmware Config failed "
2995 "mbx status x%x, status x%x add_status x%x\n",
2996 rc, shdr_status, shdr_add_status);
2997 return -EINVAL;
2998 }
2999 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
3000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3001 "2623 FCoE Function not supported by firmware. "
3002 "Function mode = %08x\n",
3003 query_fw_cfg->function_mode);
3004 return -EINVAL;
3005 }
3006 if (rc != MBX_TIMEOUT)
3007 mempool_free(mboxq, phba->mbox_mem_pool);
3008 return 0;
3009}
3010
3011/**
2702 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3012 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2703 * @phba: pointer to lpfc hba data structure. 3013 * @phba: pointer to lpfc hba data structure.
2704 * @acqe_link: pointer to the async link completion queue entry. 3014 * @acqe_link: pointer to the async link completion queue entry.
@@ -2893,6 +3203,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2893 bf_get(lpfc_acqe_link_physical, acqe_link); 3203 bf_get(lpfc_acqe_link_physical, acqe_link);
2894 phba->sli4_hba.link_state.fault = 3204 phba->sli4_hba.link_state.fault =
2895 bf_get(lpfc_acqe_link_fault, acqe_link); 3205 bf_get(lpfc_acqe_link_fault, acqe_link);
3206 phba->sli4_hba.link_state.logical_speed =
3207 bf_get(lpfc_acqe_qos_link_speed, acqe_link);
2896 3208
2897 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3209 /* Invoke the lpfc_handle_latt mailbox command callback function */
2898 lpfc_mbx_cmpl_read_la(phba, pmb); 3210 lpfc_mbx_cmpl_read_la(phba, pmb);
@@ -2906,6 +3218,68 @@ out_free_pmb:
2906} 3218}
2907 3219
2908/** 3220/**
3221 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3222 * @vport: pointer to vport data structure.
3223 *
3224 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3225 * response to a CVL event.
3226 *
3227 * Return the pointer to the ndlp with the vport if successful, otherwise
3228 * return NULL.
3229 **/
3230static struct lpfc_nodelist *
3231lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3232{
3233 struct lpfc_nodelist *ndlp;
3234 struct Scsi_Host *shost;
3235 struct lpfc_hba *phba;
3236
3237 if (!vport)
3238 return NULL;
3239 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3240 if (!ndlp)
3241 return NULL;
3242 phba = vport->phba;
3243 if (!phba)
3244 return NULL;
3245 if (phba->pport->port_state <= LPFC_FLOGI)
3246 return NULL;
3247 /* If virtual link is not yet instantiated ignore CVL */
3248 if (vport->port_state <= LPFC_FDISC)
3249 return NULL;
3250 shost = lpfc_shost_from_vport(vport);
3251 if (!shost)
3252 return NULL;
3253 lpfc_linkdown_port(vport);
3254 lpfc_cleanup_pending_mbox(vport);
3255 spin_lock_irq(shost->host_lock);
3256 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3257 spin_unlock_irq(shost->host_lock);
3258
3259 return ndlp;
3260}
3261
3262/**
3263 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3264 * @vport: pointer to lpfc hba data structure.
3265 *
3266 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3267 * response to a FCF dead event.
3268 **/
3269static void
3270lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3271{
3272 struct lpfc_vport **vports;
3273 int i;
3274
3275 vports = lpfc_create_vport_work_array(phba);
3276 if (vports)
3277 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3278 lpfc_sli4_perform_vport_cvl(vports[i]);
3279 lpfc_destroy_vport_work_array(phba, vports);
3280}
3281
3282/**
2909 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3283 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2910 * @phba: pointer to lpfc hba data structure. 3284 * @phba: pointer to lpfc hba data structure.
2911 * @acqe_link: pointer to the async fcoe completion queue entry. 3285 * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -2918,33 +3292,71 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2918{ 3292{
2919 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3293 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2920 int rc; 3294 int rc;
3295 struct lpfc_vport *vport;
3296 struct lpfc_nodelist *ndlp;
3297 struct Scsi_Host *shost;
3298 int active_vlink_present;
3299 struct lpfc_vport **vports;
3300 int i;
2921 3301
3302 phba->fc_eventTag = acqe_fcoe->event_tag;
2922 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3303 phba->fcoe_eventtag = acqe_fcoe->event_tag;
2923 switch (event_type) { 3304 switch (event_type) {
2924 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3305 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2925 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3306 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
2926 "2546 New FCF found index 0x%x tag 0x%x\n", 3307 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
2927 acqe_fcoe->fcf_index, 3308 "2546 New FCF found/FCF parameter modified event: "
2928 acqe_fcoe->event_tag); 3309 "evt_tag:x%x, fcf_index:x%x\n",
2929 /* 3310 acqe_fcoe->event_tag, acqe_fcoe->index);
2930 * If the current FCF is in discovered state, or 3311
2931 * FCF discovery is in progress do nothing.
2932 */
2933 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
2934 if ((phba->fcf.fcf_flag & FCF_DISCOVERED) || 3313 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
2935 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3314 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3315 /*
3316 * If the current FCF is in discovered state or
3317 * FCF discovery is in progress, do nothing.
3318 */
3319 spin_unlock_irq(&phba->hbalock);
3320 break;
3321 }
3322
3323 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3324 /*
3325 * If fast FCF failover rescan event is pending,
3326 * do nothing.
3327 */
2936 spin_unlock_irq(&phba->hbalock); 3328 spin_unlock_irq(&phba->hbalock);
2937 break; 3329 break;
2938 } 3330 }
2939 spin_unlock_irq(&phba->hbalock); 3331 spin_unlock_irq(&phba->hbalock);
2940 3332
2941 /* Read the FCF table and re-discover SAN. */ 3333 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
2942 rc = lpfc_sli4_read_fcf_record(phba, 3334 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2943 LPFC_FCOE_FCF_GET_FIRST); 3335 /*
3336 * During period of FCF discovery, read the FCF
3337 * table record indexed by the event to update
3338 * FCF round robin failover eligible FCF bmask.
3339 */
3340 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3341 LOG_DISCOVERY,
3342 "2779 Read new FCF record with "
3343 "fcf_index:x%x for updating FCF "
3344 "round robin failover bmask\n",
3345 acqe_fcoe->index);
3346 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3347 }
3348
3349 /* Otherwise, scan the entire FCF table and re-discover SAN */
3350 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3351 "2770 Start FCF table scan due to new FCF "
3352 "event: evt_tag:x%x, fcf_index:x%x\n",
3353 acqe_fcoe->event_tag, acqe_fcoe->index);
3354 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3355 LPFC_FCOE_FCF_GET_FIRST);
2944 if (rc) 3356 if (rc)
2945 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3357 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
2946 "2547 Read FCF record failed 0x%x\n", 3358 "2547 Issue FCF scan read FCF mailbox "
2947 rc); 3359 "command failed 0x%x\n", rc);
2948 break; 3360 break;
2949 3361
2950 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3362 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -2955,22 +3367,130 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2955 break; 3367 break;
2956 3368
2957 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3369 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2958 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3370 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
2959 "2549 FCF disconnected fron network index 0x%x" 3371 "2549 FCF disconnected from network index 0x%x"
2960 " tag 0x%x\n", acqe_fcoe->fcf_index, 3372 " tag 0x%x\n", acqe_fcoe->index,
2961 acqe_fcoe->event_tag); 3373 acqe_fcoe->event_tag);
2962 /* If the event is not for currently used fcf do nothing */ 3374 /* If the event is not for currently used fcf do nothing */
2963 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 3375 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
2964 break; 3376 break;
2965 /* 3377 /* We request port to rediscover the entire FCF table for
2966 * Currently, driver support only one FCF - so treat this as 3378 * a fast recovery from case that the current FCF record
2967 * a link down. 3379 * is no longer valid if we are not in the middle of FCF
3380 * failover process already.
2968 */ 3381 */
2969 lpfc_linkdown(phba); 3382 spin_lock_irq(&phba->hbalock);
2970 /* Unregister FCF if no devices connected to it */ 3383 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
2971 lpfc_unregister_unused_fcf(phba); 3384 spin_unlock_irq(&phba->hbalock);
3385 /* Update FLOGI FCF failover eligible FCF bmask */
3386 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3387 break;
3388 }
3389 /* Mark the fast failover process in progress */
3390 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3391 spin_unlock_irq(&phba->hbalock);
3392 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3393 "2771 Start FCF fast failover process due to "
3394 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3395 "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3396 rc = lpfc_sli4_redisc_fcf_table(phba);
3397 if (rc) {
3398 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3399 LOG_DISCOVERY,
3400 "2772 Issue FCF rediscover mabilbox "
3401 "command failed, fail through to FCF "
3402 "dead event\n");
3403 spin_lock_irq(&phba->hbalock);
3404 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3405 spin_unlock_irq(&phba->hbalock);
3406 /*
3407 * Last resort will fail over by treating this
3408 * as a link down to FCF registration.
3409 */
3410 lpfc_sli4_fcf_dead_failthrough(phba);
3411 } else
3412 /* Handling fast FCF failover to a DEAD FCF event
3413 * is considered equalivant to receiving CVL to all
3414 * vports.
3415 */
3416 lpfc_sli4_perform_all_vport_cvl(phba);
2972 break; 3417 break;
3418 case LPFC_FCOE_EVENT_TYPE_CVL:
3419 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3420 "2718 Clear Virtual Link Received for VPI 0x%x"
3421 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3422 vport = lpfc_find_vport_by_vpid(phba,
3423 acqe_fcoe->index - phba->vpi_base);
3424 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3425 if (!ndlp)
3426 break;
3427 active_vlink_present = 0;
3428
3429 vports = lpfc_create_vport_work_array(phba);
3430 if (vports) {
3431 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3432 i++) {
3433 if ((!(vports[i]->fc_flag &
3434 FC_VPORT_CVL_RCVD)) &&
3435 (vports[i]->port_state > LPFC_FDISC)) {
3436 active_vlink_present = 1;
3437 break;
3438 }
3439 }
3440 lpfc_destroy_vport_work_array(phba, vports);
3441 }
2973 3442
3443 if (active_vlink_present) {
3444 /*
3445 * If there are other active VLinks present,
3446 * re-instantiate the Vlink using FDISC.
3447 */
3448 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3449 shost = lpfc_shost_from_vport(vport);
3450 spin_lock_irq(shost->host_lock);
3451 ndlp->nlp_flag |= NLP_DELAY_TMO;
3452 spin_unlock_irq(shost->host_lock);
3453 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3454 vport->port_state = LPFC_FDISC;
3455 } else {
3456 /*
3457 * Otherwise, we request port to rediscover
3458 * the entire FCF table for a fast recovery
3459 * from possible case that the current FCF
3460 * is no longer valid if we are not already
3461 * in the FCF failover process.
3462 */
3463 spin_lock_irq(&phba->hbalock);
3464 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3465 spin_unlock_irq(&phba->hbalock);
3466 break;
3467 }
3468 /* Mark the fast failover process in progress */
3469 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3470 spin_unlock_irq(&phba->hbalock);
3471 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3472 LOG_DISCOVERY,
3473 "2773 Start FCF fast failover due "
3474 "to CVL event: evt_tag:x%x\n",
3475 acqe_fcoe->event_tag);
3476 rc = lpfc_sli4_redisc_fcf_table(phba);
3477 if (rc) {
3478 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3479 LOG_DISCOVERY,
3480 "2774 Issue FCF rediscover "
3481 "mabilbox command failed, "
3482 "through to CVL event\n");
3483 spin_lock_irq(&phba->hbalock);
3484 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3485 spin_unlock_irq(&phba->hbalock);
3486 /*
3487 * Last resort will be re-try on the
3488 * the current registered FCF entry.
3489 */
3490 lpfc_retry_pport_discovery(phba);
3491 }
3492 }
3493 break;
2974 default: 3494 default:
2975 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3495 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2976 "0288 Unknown FCoE event type 0x%x event tag " 3496 "0288 Unknown FCoE event type 0x%x event tag "
@@ -2990,6 +3510,7 @@ static void
2990lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3510lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2991 struct lpfc_acqe_dcbx *acqe_dcbx) 3511 struct lpfc_acqe_dcbx *acqe_dcbx)
2992{ 3512{
3513 phba->fc_eventTag = acqe_dcbx->event_tag;
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3514 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "0290 The SLI4 DCBX asynchronous event is not " 3515 "0290 The SLI4 DCBX asynchronous event is not "
2995 "handled yet\n"); 3516 "handled yet\n");
@@ -3044,6 +3565,37 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3044} 3565}
3045 3566
3046/** 3567/**
3568 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3569 * @phba: pointer to lpfc hba data structure.
3570 *
3571 * This routine is invoked by the worker thread to process FCF table
3572 * rediscovery pending completion event.
3573 **/
3574void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3575{
3576 int rc;
3577
3578 spin_lock_irq(&phba->hbalock);
3579 /* Clear FCF rediscovery timeout event */
3580 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3581 /* Clear driver fast failover FCF record flag */
3582 phba->fcf.failover_rec.flag = 0;
3583 /* Set state for FCF fast failover */
3584 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3585 spin_unlock_irq(&phba->hbalock);
3586
3587 /* Scan FCF table from the first entry to re-discover SAN */
3588 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3589 "2777 Start FCF table scan after FCF "
3590 "rediscovery quiescent period over\n");
3591 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3592 if (rc)
3593 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3594 "2747 Issue FCF scan read FCF mailbox "
3595 "command failed 0x%x\n", rc);
3596}
3597
3598/**
3047 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3599 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3048 * @phba: pointer to lpfc hba data structure. 3600 * @phba: pointer to lpfc hba data structure.
3049 * @dev_grp: The HBA PCI-Device group number. 3601 * @dev_grp: The HBA PCI-Device group number.
@@ -3124,7 +3676,7 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3124 * PCI devices. 3676 * PCI devices.
3125 * 3677 *
3126 * Return codes 3678 * Return codes
3127 * 0 - sucessful 3679 * 0 - successful
3128 * other values - error 3680 * other values - error
3129 **/ 3681 **/
3130static int 3682static int
@@ -3220,7 +3772,7 @@ lpfc_reset_hba(struct lpfc_hba *phba)
3220 * support the SLI-3 HBA device it attached to. 3772 * support the SLI-3 HBA device it attached to.
3221 * 3773 *
3222 * Return codes 3774 * Return codes
3223 * 0 - sucessful 3775 * 0 - successful
3224 * other values - error 3776 * other values - error
3225 **/ 3777 **/
3226static int 3778static int
@@ -3321,15 +3873,18 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3321 * support the SLI-4 HBA device it attached to. 3873 * support the SLI-4 HBA device it attached to.
3322 * 3874 *
3323 * Return codes 3875 * Return codes
3324 * 0 - sucessful 3876 * 0 - successful
3325 * other values - error 3877 * other values - error
3326 **/ 3878 **/
3327static int 3879static int
3328lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3880lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3329{ 3881{
3330 struct lpfc_sli *psli; 3882 struct lpfc_sli *psli;
3331 int rc; 3883 LPFC_MBOXQ_t *mboxq;
3332 int i, hbq_count; 3884 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3885 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3886 struct lpfc_mqe *mqe;
3887 int longs;
3333 3888
3334 /* Before proceed, wait for POST done and device ready */ 3889 /* Before proceed, wait for POST done and device ready */
3335 rc = lpfc_sli4_post_status_check(phba); 3890 rc = lpfc_sli4_post_status_check(phba);
@@ -3358,6 +3913,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3358 init_timer(&phba->eratt_poll); 3913 init_timer(&phba->eratt_poll);
3359 phba->eratt_poll.function = lpfc_poll_eratt; 3914 phba->eratt_poll.function = lpfc_poll_eratt;
3360 phba->eratt_poll.data = (unsigned long) phba; 3915 phba->eratt_poll.data = (unsigned long) phba;
3916 /* FCF rediscover timer */
3917 init_timer(&phba->fcf.redisc_wait);
3918 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
3919 phba->fcf.redisc_wait.data = (unsigned long)phba;
3920
3361 /* 3921 /*
3362 * We need to do a READ_CONFIG mailbox command here before 3922 * We need to do a READ_CONFIG mailbox command here before
3363 * calling lpfc_get_cfgparam. For VFs this will report the 3923 * calling lpfc_get_cfgparam. For VFs this will report the
@@ -3382,31 +3942,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3382 * used to create the sg_dma_buf_pool must be dynamically calculated. 3942 * used to create the sg_dma_buf_pool must be dynamically calculated.
3383 * 2 segments are added since the IOCB needs a command and response bde. 3943 * 2 segments are added since the IOCB needs a command and response bde.
3384 * To insure that the scsi sgl does not cross a 4k page boundary only 3944 * To insure that the scsi sgl does not cross a 4k page boundary only
3385 * sgl sizes of 1k, 2k, 4k, and 8k are supported. 3945 * sgl sizes of must be a power of 2.
3386 * Table of sgl sizes and seg_cnt:
3387 * sgl size, sg_seg_cnt total seg
3388 * 1k 50 52
3389 * 2k 114 116
3390 * 4k 242 244
3391 * 8k 498 500
3392 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3393 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3394 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3395 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3396 */ 3946 */
3397 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) 3947 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
3398 phba->cfg_sg_seg_cnt = 50; 3948 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
3399 else if (phba->cfg_sg_seg_cnt <= 114) 3949 /* Feature Level 1 hardware is limited to 2 pages */
3400 phba->cfg_sg_seg_cnt = 114; 3950 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
3401 else if (phba->cfg_sg_seg_cnt <= 242) 3951 LPFC_SLI_INTF_FEATURELEVEL1_1))
3402 phba->cfg_sg_seg_cnt = 242; 3952 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
3403 else 3953 else
3404 phba->cfg_sg_seg_cnt = 498; 3954 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
3405 3955 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
3406 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) 3956 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
3407 + sizeof(struct fcp_rsp); 3957 dma_buf_size = dma_buf_size << 1)
3408 phba->cfg_sg_dma_buf_size += 3958 ;
3409 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 3959 if (dma_buf_size == max_buf_size)
3960 phba->cfg_sg_seg_cnt = (dma_buf_size -
3961 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
3962 (2 * sizeof(struct sli4_sge))) /
3963 sizeof(struct sli4_sge);
3964 phba->cfg_sg_dma_buf_size = dma_buf_size;
3410 3965
3411 /* Initialize buffer queue management fields */ 3966 /* Initialize buffer queue management fields */
3412 hbq_count = lpfc_sli_hbq_count(); 3967 hbq_count = lpfc_sli_hbq_count();
@@ -3432,7 +3987,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3432 /* Driver internel slow-path CQ Event pool */ 3987 /* Driver internel slow-path CQ Event pool */
3433 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3988 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3434 /* Response IOCB work queue list */ 3989 /* Response IOCB work queue list */
3435 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); 3990 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
3436 /* Asynchronous event CQ Event work queue list */ 3991 /* Asynchronous event CQ Event work queue list */
3437 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3992 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3438 /* Fast-path XRI aborted CQ Event work queue list */ 3993 /* Fast-path XRI aborted CQ Event work queue list */
@@ -3461,6 +4016,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3461 if (unlikely(rc)) 4016 if (unlikely(rc))
3462 goto out_free_bsmbx; 4017 goto out_free_bsmbx;
3463 4018
4019 rc = lpfc_sli4_fw_cfg_check(phba);
4020 if (unlikely(rc))
4021 goto out_free_bsmbx;
4022
3464 /* Set up the hba's configuration parameters. */ 4023 /* Set up the hba's configuration parameters. */
3465 rc = lpfc_sli4_read_config(phba); 4024 rc = lpfc_sli4_read_config(phba);
3466 if (unlikely(rc)) 4025 if (unlikely(rc))
@@ -3502,13 +4061,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3502 goto out_free_active_sgl; 4061 goto out_free_active_sgl;
3503 } 4062 }
3504 4063
4064 /* Allocate eligible FCF bmask memory for FCF round robin failover */
4065 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4066 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4067 GFP_KERNEL);
4068 if (!phba->fcf.fcf_rr_bmask) {
4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4070 "2759 Failed allocate memory for FCF round "
4071 "robin failover bmask\n");
4072 goto out_remove_rpi_hdrs;
4073 }
4074
3505 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4075 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3506 phba->cfg_fcp_eq_count), GFP_KERNEL); 4076 phba->cfg_fcp_eq_count), GFP_KERNEL);
3507 if (!phba->sli4_hba.fcp_eq_hdl) { 4077 if (!phba->sli4_hba.fcp_eq_hdl) {
3508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3509 "2572 Failed allocate memory for fast-path " 4079 "2572 Failed allocate memory for fast-path "
3510 "per-EQ handle array\n"); 4080 "per-EQ handle array\n");
3511 goto out_remove_rpi_hdrs; 4081 goto out_free_fcf_rr_bmask;
3512 } 4082 }
3513 4083
3514 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4084 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3520,10 +4090,49 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3520 goto out_free_fcp_eq_hdl; 4090 goto out_free_fcp_eq_hdl;
3521 } 4091 }
3522 4092
4093 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4094 GFP_KERNEL);
4095 if (!mboxq) {
4096 rc = -ENOMEM;
4097 goto out_free_fcp_eq_hdl;
4098 }
4099
4100 /* Get the Supported Pages. It is always available. */
4101 lpfc_supported_pages(mboxq);
4102 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4103 if (unlikely(rc)) {
4104 rc = -EIO;
4105 mempool_free(mboxq, phba->mbox_mem_pool);
4106 goto out_free_fcp_eq_hdl;
4107 }
4108
4109 mqe = &mboxq->u.mqe;
4110 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4111 LPFC_MAX_SUPPORTED_PAGES);
4112 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4113 switch (pn_page[i]) {
4114 case LPFC_SLI4_PARAMETERS:
4115 phba->sli4_hba.pc_sli4_params.supported = 1;
4116 break;
4117 default:
4118 break;
4119 }
4120 }
4121
4122 /* Read the port's SLI4 Parameters capabilities if supported. */
4123 if (phba->sli4_hba.pc_sli4_params.supported)
4124 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4125 mempool_free(mboxq, phba->mbox_mem_pool);
4126 if (rc) {
4127 rc = -EIO;
4128 goto out_free_fcp_eq_hdl;
4129 }
3523 return rc; 4130 return rc;
3524 4131
3525out_free_fcp_eq_hdl: 4132out_free_fcp_eq_hdl:
3526 kfree(phba->sli4_hba.fcp_eq_hdl); 4133 kfree(phba->sli4_hba.fcp_eq_hdl);
4134out_free_fcf_rr_bmask:
4135 kfree(phba->fcf.fcf_rr_bmask);
3527out_remove_rpi_hdrs: 4136out_remove_rpi_hdrs:
3528 lpfc_sli4_remove_rpi_hdrs(phba); 4137 lpfc_sli4_remove_rpi_hdrs(phba);
3529out_free_active_sgl: 4138out_free_active_sgl:
@@ -3569,6 +4178,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3569 lpfc_sli4_remove_rpi_hdrs(phba); 4178 lpfc_sli4_remove_rpi_hdrs(phba);
3570 lpfc_sli4_remove_rpis(phba); 4179 lpfc_sli4_remove_rpis(phba);
3571 4180
4181 /* Free eligible FCF index bmask */
4182 kfree(phba->fcf.fcf_rr_bmask);
4183
3572 /* Free the ELS sgl list */ 4184 /* Free the ELS sgl list */
3573 lpfc_free_active_sgl(phba); 4185 lpfc_free_active_sgl(phba);
3574 lpfc_free_sgl_list(phba); 4186 lpfc_free_sgl_list(phba);
@@ -3594,8 +4206,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3594 4206
3595 /* Free the current connect table */ 4207 /* Free the current connect table */
3596 list_for_each_entry_safe(conn_entry, next_conn_entry, 4208 list_for_each_entry_safe(conn_entry, next_conn_entry,
3597 &phba->fcf_conn_rec_list, list) 4209 &phba->fcf_conn_rec_list, list) {
4210 list_del_init(&conn_entry->list);
3598 kfree(conn_entry); 4211 kfree(conn_entry);
4212 }
3599 4213
3600 return; 4214 return;
3601} 4215}
@@ -3613,6 +4227,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3613int 4227int
3614lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4228lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3615{ 4229{
4230 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4231 phba->lpfc_hba_down_link = lpfc_hba_down_link;
3616 switch (dev_grp) { 4232 switch (dev_grp) {
3617 case LPFC_PCI_DEV_LP: 4233 case LPFC_PCI_DEV_LP:
3618 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4234 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -3642,7 +4258,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3642 * device specific resource setup to support the HBA device it attached to. 4258 * device specific resource setup to support the HBA device it attached to.
3643 * 4259 *
3644 * Return codes 4260 * Return codes
3645 * 0 - sucessful 4261 * 0 - successful
3646 * other values - error 4262 * other values - error
3647 **/ 4263 **/
3648static int 4264static int
@@ -3688,7 +4304,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3688 * device specific resource setup to support the HBA device it attached to. 4304 * device specific resource setup to support the HBA device it attached to.
3689 * 4305 *
3690 * Return codes 4306 * Return codes
3691 * 0 - sucessful 4307 * 0 - successful
3692 * other values - error 4308 * other values - error
3693 **/ 4309 **/
3694static int 4310static int
@@ -3753,7 +4369,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
3753 * list and set up the IOCB tag array accordingly. 4369 * list and set up the IOCB tag array accordingly.
3754 * 4370 *
3755 * Return codes 4371 * Return codes
3756 * 0 - sucessful 4372 * 0 - successful
3757 * other values - error 4373 * other values - error
3758 **/ 4374 **/
3759static int 4375static int
@@ -3824,7 +4440,7 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
3824 rc = lpfc_sli4_remove_all_sgl_pages(phba); 4440 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3825 if (rc) { 4441 if (rc) {
3826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4442 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3827 "2005 Unable to deregister pages from HBA: %x", rc); 4443 "2005 Unable to deregister pages from HBA: %x\n", rc);
3828 } 4444 }
3829 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4445 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3830} 4446}
@@ -3872,7 +4488,7 @@ lpfc_free_active_sgl(struct lpfc_hba *phba)
3872 * list and set up the sgl xritag tag array accordingly. 4488 * list and set up the sgl xritag tag array accordingly.
3873 * 4489 *
3874 * Return codes 4490 * Return codes
3875 * 0 - sucessful 4491 * 0 - successful
3876 * other values - error 4492 * other values - error
3877 **/ 4493 **/
3878static int 4494static int
@@ -3960,6 +4576,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
3960 4576
3961 /* The list order is used by later block SGL registraton */ 4577 /* The list order is used by later block SGL registraton */
3962 spin_lock_irq(&phba->hbalock); 4578 spin_lock_irq(&phba->hbalock);
4579 sglq_entry->state = SGL_FREED;
3963 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4580 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3964 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4581 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3965 phba->sli4_hba.total_sglq_bufs++; 4582 phba->sli4_hba.total_sglq_bufs++;
@@ -3986,7 +4603,7 @@ out_free_mem:
3986 * enabled and the driver is reinitializing the device. 4603 * enabled and the driver is reinitializing the device.
3987 * 4604 *
3988 * Return codes 4605 * Return codes
3989 * 0 - sucessful 4606 * 0 - successful
3990 * ENOMEM - No availble memory 4607 * ENOMEM - No availble memory
3991 * EIO - The mailbox failed to complete successfully. 4608 * EIO - The mailbox failed to complete successfully.
3992 **/ 4609 **/
@@ -4146,7 +4763,7 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4146 * PCI device data structure is set. 4763 * PCI device data structure is set.
4147 * 4764 *
4148 * Return codes 4765 * Return codes
4149 * pointer to @phba - sucessful 4766 * pointer to @phba - successful
4150 * NULL - error 4767 * NULL - error
4151 **/ 4768 **/
4152static struct lpfc_hba * 4769static struct lpfc_hba *
@@ -4171,7 +4788,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
4171 return NULL; 4788 return NULL;
4172 } 4789 }
4173 4790
4174 mutex_init(&phba->ct_event_mutex); 4791 spin_lock_init(&phba->ct_ev_lock);
4175 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4792 INIT_LIST_HEAD(&phba->ct_ev_waiters);
4176 4793
4177 return phba; 4794 return phba;
@@ -4202,7 +4819,7 @@ lpfc_hba_free(struct lpfc_hba *phba)
4202 * host with it. 4819 * host with it.
4203 * 4820 *
4204 * Return codes 4821 * Return codes
4205 * 0 - sucessful 4822 * 0 - successful
4206 * other values - error 4823 * other values - error
4207 **/ 4824 **/
4208static int 4825static int
@@ -4273,7 +4890,8 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4273 _dump_buf_data = 4890 _dump_buf_data =
4274 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4891 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4275 if (_dump_buf_data) { 4892 if (_dump_buf_data) {
4276 printk(KERN_ERR "BLKGRD allocated %d pages for " 4893 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4894 "9043 BLKGRD: allocated %d pages for "
4277 "_dump_buf_data at 0x%p\n", 4895 "_dump_buf_data at 0x%p\n",
4278 (1 << pagecnt), _dump_buf_data); 4896 (1 << pagecnt), _dump_buf_data);
4279 _dump_buf_data_order = pagecnt; 4897 _dump_buf_data_order = pagecnt;
@@ -4284,17 +4902,20 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4284 --pagecnt; 4902 --pagecnt;
4285 } 4903 }
4286 if (!_dump_buf_data_order) 4904 if (!_dump_buf_data_order)
4287 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4905 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4906 "9044 BLKGRD: ERROR unable to allocate "
4288 "memory for hexdump\n"); 4907 "memory for hexdump\n");
4289 } else 4908 } else
4290 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 4909 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4910 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4291 "\n", _dump_buf_data); 4911 "\n", _dump_buf_data);
4292 if (!_dump_buf_dif) { 4912 if (!_dump_buf_dif) {
4293 while (pagecnt) { 4913 while (pagecnt) {
4294 _dump_buf_dif = 4914 _dump_buf_dif =
4295 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4915 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4296 if (_dump_buf_dif) { 4916 if (_dump_buf_dif) {
4297 printk(KERN_ERR "BLKGRD allocated %d pages for " 4917 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4918 "9046 BLKGRD: allocated %d pages for "
4298 "_dump_buf_dif at 0x%p\n", 4919 "_dump_buf_dif at 0x%p\n",
4299 (1 << pagecnt), _dump_buf_dif); 4920 (1 << pagecnt), _dump_buf_dif);
4300 _dump_buf_dif_order = pagecnt; 4921 _dump_buf_dif_order = pagecnt;
@@ -4305,10 +4926,12 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4305 --pagecnt; 4926 --pagecnt;
4306 } 4927 }
4307 if (!_dump_buf_dif_order) 4928 if (!_dump_buf_dif_order)
4308 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4929 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4930 "9047 BLKGRD: ERROR unable to allocate "
4309 "memory for hexdump\n"); 4931 "memory for hexdump\n");
4310 } else 4932 } else
4311 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 4933 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4934 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4312 _dump_buf_dif); 4935 _dump_buf_dif);
4313} 4936}
4314 4937
@@ -4365,7 +4988,7 @@ lpfc_post_init_setup(struct lpfc_hba *phba)
4365 * with SLI-3 interface spec. 4988 * with SLI-3 interface spec.
4366 * 4989 *
4367 * Return codes 4990 * Return codes
4368 * 0 - sucessful 4991 * 0 - successful
4369 * other values - error 4992 * other values - error
4370 **/ 4993 **/
4371static int 4994static int
@@ -4384,9 +5007,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4384 pdev = phba->pcidev; 5007 pdev = phba->pcidev;
4385 5008
4386 /* Set the device DMA mask size */ 5009 /* Set the device DMA mask size */
4387 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 5010 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
4388 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5011 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5012 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5013 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
4389 return error; 5014 return error;
5015 }
5016 }
4390 5017
4391 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5018 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4392 * required by each mapping. 5019 * required by each mapping.
@@ -4511,8 +5138,7 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4511int 5138int
4512lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5139lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4513{ 5140{
4514 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 5141 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
4515 uint32_t onlnreg0, onlnreg1;
4516 int i, port_error = -ENODEV; 5142 int i, port_error = -ENODEV;
4517 5143
4518 if (!phba->sli4_hba.STAregaddr) 5144 if (!phba->sli4_hba.STAregaddr)
@@ -4548,29 +5174,35 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4548 bf_get(lpfc_hst_state_port_status, &sta_reg)); 5174 bf_get(lpfc_hst_state_port_status, &sta_reg));
4549 5175
4550 /* Log device information */ 5176 /* Log device information */
4551 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); 5177 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
4552 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5178 if (bf_get(lpfc_sli_intf_valid,
4553 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5179 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
4554 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5180 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4555 bf_get(lpfc_scratchpad_chiptype, &scratchpad), 5181 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4556 bf_get(lpfc_scratchpad_slirev, &scratchpad), 5182 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4557 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 5183 bf_get(lpfc_sli_intf_sli_family,
4558 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 5184 &phba->sli4_hba.sli_intf),
4559 5185 bf_get(lpfc_sli_intf_slirev,
5186 &phba->sli4_hba.sli_intf),
5187 bf_get(lpfc_sli_intf_featurelevel1,
5188 &phba->sli4_hba.sli_intf),
5189 bf_get(lpfc_sli_intf_featurelevel2,
5190 &phba->sli4_hba.sli_intf));
5191 }
5192 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5193 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
4560 /* With uncoverable error, log the error message and return error */ 5194 /* With uncoverable error, log the error message and return error */
4561 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 5195 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4562 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 5196 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4563 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 5197 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
4564 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 5198 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
4565 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 5199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4566 if (uerrlo_reg.word0 || uerrhi_reg.word0) { 5200 "1422 HBA Unrecoverable error: "
4567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5201 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4568 "1422 HBA Unrecoverable error: " 5202 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
4569 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 5203 uerrlo_reg.word0, uerrhi_reg.word0,
4570 "online0_reg=0x%x, online1_reg=0x%x\n", 5204 phba->sli4_hba.ue_mask_lo,
4571 uerrlo_reg.word0, uerrhi_reg.word0, 5205 phba->sli4_hba.ue_mask_hi);
4572 onlnreg0, onlnreg1);
4573 }
4574 return -ENODEV; 5206 return -ENODEV;
4575 } 5207 }
4576 5208
@@ -4591,12 +5223,12 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4591 LPFC_UERR_STATUS_LO; 5223 LPFC_UERR_STATUS_LO;
4592 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5224 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4593 LPFC_UERR_STATUS_HI; 5225 LPFC_UERR_STATUS_HI;
4594 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + 5226 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4595 LPFC_ONLINE0; 5227 LPFC_UE_MASK_LO;
4596 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + 5228 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4597 LPFC_ONLINE1; 5229 LPFC_UE_MASK_HI;
4598 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 5230 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
4599 LPFC_SCRATCHPAD; 5231 LPFC_SLI_INTF;
4600} 5232}
4601 5233
4602/** 5234/**
@@ -4662,7 +5294,7 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4662 * this routine. 5294 * this routine.
4663 * 5295 *
4664 * Return codes 5296 * Return codes
4665 * 0 - sucessful 5297 * 0 - successful
4666 * ENOMEM - could not allocated memory. 5298 * ENOMEM - could not allocated memory.
4667 **/ 5299 **/
4668static int 5300static int
@@ -4761,7 +5393,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4761 * allocation for the port. 5393 * allocation for the port.
4762 * 5394 *
4763 * Return codes 5395 * Return codes
4764 * 0 - sucessful 5396 * 0 - successful
4765 * ENOMEM - No availble memory 5397 * ENOMEM - No availble memory
4766 * EIO - The mailbox failed to complete successfully. 5398 * EIO - The mailbox failed to complete successfully.
4767 **/ 5399 **/
@@ -4825,7 +5457,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
4825 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5457 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4826 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5458 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4827 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5459 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4828 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; 5460 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5461 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
4829 phba->max_vports = phba->max_vpi; 5462 phba->max_vports = phba->max_vpi;
4830 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5463 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4831 "2003 cfg params XRI(B:%d M:%d), " 5464 "2003 cfg params XRI(B:%d M:%d), "
@@ -4861,7 +5494,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
4861 * HBA consistent with the SLI-4 interface spec. 5494 * HBA consistent with the SLI-4 interface spec.
4862 * 5495 *
4863 * Return codes 5496 * Return codes
4864 * 0 - sucessful 5497 * 0 - successful
4865 * ENOMEM - No availble memory 5498 * ENOMEM - No availble memory
4866 * EIO - The mailbox failed to complete successfully. 5499 * EIO - The mailbox failed to complete successfully.
4867 **/ 5500 **/
@@ -4910,7 +5543,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
4910 * we just use some constant number as place holder. 5543 * we just use some constant number as place holder.
4911 * 5544 *
4912 * Return codes 5545 * Return codes
4913 * 0 - sucessful 5546 * 0 - successful
4914 * ENOMEM - No availble memory 5547 * ENOMEM - No availble memory
4915 * EIO - The mailbox failed to complete successfully. 5548 * EIO - The mailbox failed to complete successfully.
4916 **/ 5549 **/
@@ -4979,10 +5612,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
4979 /* It does not make sense to have more EQs than WQs */ 5612 /* It does not make sense to have more EQs than WQs */
4980 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5613 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4981 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5614 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4982 "2593 The number of FCP EQs (%d) is more " 5615 "2593 The FCP EQ count(%d) cannot be greater "
4983 "than the number of FCP WQs (%d), take " 5616 "than the FCP WQ count(%d), limiting the "
4984 "the number of FCP EQs same as than of " 5617 "FCP EQ count to %d\n", cfg_fcp_eq_count,
4985 "WQs (%d)\n", cfg_fcp_eq_count,
4986 phba->cfg_fcp_wq_count, 5618 phba->cfg_fcp_wq_count,
4987 phba->cfg_fcp_wq_count); 5619 phba->cfg_fcp_wq_count);
4988 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5620 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
@@ -5058,15 +5690,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5058 } 5690 }
5059 phba->sli4_hba.els_cq = qdesc; 5691 phba->sli4_hba.els_cq = qdesc;
5060 5692
5061 /* Create slow-path Unsolicited Receive Complete Queue */
5062 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5063 phba->sli4_hba.cq_ecount);
5064 if (!qdesc) {
5065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5066 "0502 Failed allocate slow-path USOL RX CQ\n");
5067 goto out_free_els_cq;
5068 }
5069 phba->sli4_hba.rxq_cq = qdesc;
5070 5693
5071 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5694 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5072 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5695 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
@@ -5075,7 +5698,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5076 "2577 Failed allocate memory for fast-path " 5699 "2577 Failed allocate memory for fast-path "
5077 "CQ record array\n"); 5700 "CQ record array\n");
5078 goto out_free_rxq_cq; 5701 goto out_free_els_cq;
5079 } 5702 }
5080 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5703 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5081 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5704 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
@@ -5188,9 +5811,6 @@ out_free_fcp_cq:
5188 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5811 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5189 } 5812 }
5190 kfree(phba->sli4_hba.fcp_cq); 5813 kfree(phba->sli4_hba.fcp_cq);
5191out_free_rxq_cq:
5192 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5193 phba->sli4_hba.rxq_cq = NULL;
5194out_free_els_cq: 5814out_free_els_cq:
5195 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5815 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5196 phba->sli4_hba.els_cq = NULL; 5816 phba->sli4_hba.els_cq = NULL;
@@ -5218,7 +5838,7 @@ out_error:
5218 * operation. 5838 * operation.
5219 * 5839 *
5220 * Return codes 5840 * Return codes
5221 * 0 - sucessful 5841 * 0 - successful
5222 * ENOMEM - No availble memory 5842 * ENOMEM - No availble memory
5223 * EIO - The mailbox failed to complete successfully. 5843 * EIO - The mailbox failed to complete successfully.
5224 **/ 5844 **/
@@ -5247,10 +5867,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5247 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5867 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5248 phba->sli4_hba.dat_rq = NULL; 5868 phba->sli4_hba.dat_rq = NULL;
5249 5869
5250 /* Release unsolicited receive complete queue */
5251 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5252 phba->sli4_hba.rxq_cq = NULL;
5253
5254 /* Release ELS complete queue */ 5870 /* Release ELS complete queue */
5255 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5871 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5256 phba->sli4_hba.els_cq = NULL; 5872 phba->sli4_hba.els_cq = NULL;
@@ -5286,7 +5902,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5286 * operation. 5902 * operation.
5287 * 5903 *
5288 * Return codes 5904 * Return codes
5289 * 0 - sucessful 5905 * 0 - successful
5290 * ENOMEM - No availble memory 5906 * ENOMEM - No availble memory
5291 * EIO - The mailbox failed to complete successfully. 5907 * EIO - The mailbox failed to complete successfully.
5292 **/ 5908 **/
@@ -5383,25 +5999,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5383 phba->sli4_hba.els_cq->queue_id, 5999 phba->sli4_hba.els_cq->queue_id,
5384 phba->sli4_hba.sp_eq->queue_id); 6000 phba->sli4_hba.sp_eq->queue_id);
5385 6001
5386 /* Set up slow-path Unsolicited Receive Complete Queue */
5387 if (!phba->sli4_hba.rxq_cq) {
5388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5389 "0532 USOL RX CQ not allocated\n");
5390 goto out_destroy_els_cq;
5391 }
5392 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5393 LPFC_RCQ, LPFC_USOL);
5394 if (rc) {
5395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5396 "0533 Failed setup of slow-path USOL RX CQ: "
5397 "rc = 0x%x\n", rc);
5398 goto out_destroy_els_cq;
5399 }
5400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5401 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5402 phba->sli4_hba.rxq_cq->queue_id,
5403 phba->sli4_hba.sp_eq->queue_id);
5404
5405 /* Set up fast-path FCP Response Complete Queue */ 6002 /* Set up fast-path FCP Response Complete Queue */
5406 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6003 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5407 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6004 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -5507,7 +6104,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5507 goto out_destroy_fcp_wq; 6104 goto out_destroy_fcp_wq;
5508 } 6105 }
5509 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6106 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5510 phba->sli4_hba.rxq_cq, LPFC_USOL); 6107 phba->sli4_hba.els_cq, LPFC_USOL);
5511 if (rc) { 6108 if (rc) {
5512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5513 "0541 Failed setup of Receive Queue: " 6110 "0541 Failed setup of Receive Queue: "
@@ -5519,7 +6116,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5519 "parent cq-id=%d\n", 6116 "parent cq-id=%d\n",
5520 phba->sli4_hba.hdr_rq->queue_id, 6117 phba->sli4_hba.hdr_rq->queue_id,
5521 phba->sli4_hba.dat_rq->queue_id, 6118 phba->sli4_hba.dat_rq->queue_id,
5522 phba->sli4_hba.rxq_cq->queue_id); 6119 phba->sli4_hba.els_cq->queue_id);
5523 return 0; 6120 return 0;
5524 6121
5525out_destroy_fcp_wq: 6122out_destroy_fcp_wq:
@@ -5531,8 +6128,6 @@ out_destroy_mbx_wq:
5531out_destroy_fcp_cq: 6128out_destroy_fcp_cq:
5532 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6129 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5533 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6130 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5534 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5535out_destroy_els_cq:
5536 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6131 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5537out_destroy_mbx_cq: 6132out_destroy_mbx_cq:
5538 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6133 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -5552,7 +6147,7 @@ out_error:
5552 * operation. 6147 * operation.
5553 * 6148 *
5554 * Return codes 6149 * Return codes
5555 * 0 - sucessful 6150 * 0 - successful
5556 * ENOMEM - No availble memory 6151 * ENOMEM - No availble memory
5557 * EIO - The mailbox failed to complete successfully. 6152 * EIO - The mailbox failed to complete successfully.
5558 **/ 6153 **/
@@ -5574,8 +6169,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5574 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6169 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5575 /* Unset ELS complete queue */ 6170 /* Unset ELS complete queue */
5576 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6171 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5577 /* Unset unsolicited receive complete queue */
5578 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5579 /* Unset FCP response complete queue */ 6172 /* Unset FCP response complete queue */
5580 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6173 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5581 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6174 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
@@ -5599,7 +6192,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5599 * Later, this can be used for all the slow-path events. 6192 * Later, this can be used for all the slow-path events.
5600 * 6193 *
5601 * Return codes 6194 * Return codes
5602 * 0 - sucessful 6195 * 0 - successful
5603 * -ENOMEM - No availble memory 6196 * -ENOMEM - No availble memory
5604 **/ 6197 **/
5605static int 6198static int
@@ -5760,7 +6353,7 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5760 * all resources assigned to the PCI function which originates this request. 6353 * all resources assigned to the PCI function which originates this request.
5761 * 6354 *
5762 * Return codes 6355 * Return codes
5763 * 0 - sucessful 6356 * 0 - successful
5764 * ENOMEM - No availble memory 6357 * ENOMEM - No availble memory
5765 * EIO - The mailbox failed to complete successfully. 6358 * EIO - The mailbox failed to complete successfully.
5766 **/ 6359 **/
@@ -5910,7 +6503,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5910 spin_lock_irqsave(&phba->hbalock, flags); 6503 spin_lock_irqsave(&phba->hbalock, flags);
5911 /* Mark the FCFI is no longer registered */ 6504 /* Mark the FCFI is no longer registered */
5912 phba->fcf.fcf_flag &= 6505 phba->fcf.fcf_flag &=
5913 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); 6506 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
5914 spin_unlock_irqrestore(&phba->hbalock, flags); 6507 spin_unlock_irqrestore(&phba->hbalock, flags);
5915 } 6508 }
5916} 6509}
@@ -5923,7 +6516,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5923 * with SLI-4 interface spec. 6516 * with SLI-4 interface spec.
5924 * 6517 *
5925 * Return codes 6518 * Return codes
5926 * 0 - sucessful 6519 * 0 - successful
5927 * other values - error 6520 * other values - error
5928 **/ 6521 **/
5929static int 6522static int
@@ -5940,22 +6533,30 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5940 pdev = phba->pcidev; 6533 pdev = phba->pcidev;
5941 6534
5942 /* Set the device DMA mask size */ 6535 /* Set the device DMA mask size */
5943 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 6536 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5944 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 6537 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6538 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6539 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5945 return error; 6540 return error;
6541 }
6542 }
5946 6543
5947 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6544 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5948 * number of bytes required by each mapping. They are actually 6545 * number of bytes required by each mapping. They are actually
5949 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. 6546 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
5950 */ 6547 */
5951 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); 6548 if (pci_resource_start(pdev, 0)) {
5952 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); 6549 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5953 6550 bar0map_len = pci_resource_len(pdev, 0);
5954 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); 6551 } else {
5955 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); 6552 phba->pci_bar0_map = pci_resource_start(pdev, 1);
6553 bar0map_len = pci_resource_len(pdev, 1);
6554 }
6555 phba->pci_bar1_map = pci_resource_start(pdev, 2);
6556 bar1map_len = pci_resource_len(pdev, 2);
5956 6557
5957 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); 6558 phba->pci_bar2_map = pci_resource_start(pdev, 4);
5958 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); 6559 bar2map_len = pci_resource_len(pdev, 4);
5959 6560
5960 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6561 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5961 phba->sli4_hba.conf_regs_memmap_p = 6562 phba->sli4_hba.conf_regs_memmap_p =
@@ -6052,7 +6653,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6052 * will be left with MSI-X enabled and leaks its vectors. 6653 * will be left with MSI-X enabled and leaks its vectors.
6053 * 6654 *
6054 * Return codes 6655 * Return codes
6055 * 0 - sucessful 6656 * 0 - successful
6056 * other values - error 6657 * other values - error
6057 **/ 6658 **/
6058static int 6659static int
@@ -6184,7 +6785,7 @@ lpfc_sli_disable_msix(struct lpfc_hba *phba)
6184 * is done in this function. 6785 * is done in this function.
6185 * 6786 *
6186 * Return codes 6787 * Return codes
6187 * 0 - sucessful 6788 * 0 - successful
6188 * other values - error 6789 * other values - error
6189 */ 6790 */
6190static int 6791static int
@@ -6243,7 +6844,7 @@ lpfc_sli_disable_msi(struct lpfc_hba *phba)
6243 * MSI-X -> MSI -> IRQ. 6844 * MSI-X -> MSI -> IRQ.
6244 * 6845 *
6245 * Return codes 6846 * Return codes
6246 * 0 - sucessful 6847 * 0 - successful
6247 * other values - error 6848 * other values - error
6248 **/ 6849 **/
6249static uint32_t 6850static uint32_t
@@ -6333,7 +6934,7 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
6333 * enabled and leaks its vectors. 6934 * enabled and leaks its vectors.
6334 * 6935 *
6335 * Return codes 6936 * Return codes
6336 * 0 - sucessful 6937 * 0 - successful
6337 * other values - error 6938 * other values - error
6338 **/ 6939 **/
6339static int 6940static int
@@ -6443,7 +7044,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6443 * which is done in this function. 7044 * which is done in this function.
6444 * 7045 *
6445 * Return codes 7046 * Return codes
6446 * 0 - sucessful 7047 * 0 - successful
6447 * other values - error 7048 * other values - error
6448 **/ 7049 **/
6449static int 7050static int
@@ -6508,7 +7109,7 @@ lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6508 * MSI-X -> MSI -> IRQ. 7109 * MSI-X -> MSI -> IRQ.
6509 * 7110 *
6510 * Return codes 7111 * Return codes
6511 * 0 - sucessful 7112 * 0 - successful
6512 * other values - error 7113 * other values - error
6513 **/ 7114 **/
6514static uint32_t 7115static uint32_t
@@ -6700,6 +7301,73 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6700 phba->pport->work_port_events = 0; 7301 phba->pport->work_port_events = 0;
6701} 7302}
6702 7303
7304 /**
7305 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7306 * @phba: Pointer to HBA context object.
7307 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7308 *
7309 * This function is called in the SLI4 code path to read the port's
7310 * sli4 capabilities.
7311 *
7312 * This function may be be called from any context that can block-wait
7313 * for the completion. The expectation is that this routine is called
7314 * typically from probe_one or from the online routine.
7315 **/
7316int
7317lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7318{
7319 int rc;
7320 struct lpfc_mqe *mqe;
7321 struct lpfc_pc_sli4_params *sli4_params;
7322 uint32_t mbox_tmo;
7323
7324 rc = 0;
7325 mqe = &mboxq->u.mqe;
7326
7327 /* Read the port's SLI4 Parameters port capabilities */
7328 lpfc_sli4_params(mboxq);
7329 if (!phba->sli4_hba.intr_enable)
7330 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7331 else {
7332 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7333 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7334 }
7335
7336 if (unlikely(rc))
7337 return 1;
7338
7339 sli4_params = &phba->sli4_hba.pc_sli4_params;
7340 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7341 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7342 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7343 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7344 &mqe->un.sli4_params);
7345 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7346 &mqe->un.sli4_params);
7347 sli4_params->proto_types = mqe->un.sli4_params.word3;
7348 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7349 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7350 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7351 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7352 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7353 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7354 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7355 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7356 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7357 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7358 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7359 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7360 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7361 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7362 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7363 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7364 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7365 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7366 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7367 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7368 return rc;
7369}
7370
6703/** 7371/**
6704 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7372 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6705 * @pdev: pointer to PCI device 7373 * @pdev: pointer to PCI device
@@ -6722,6 +7390,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6722{ 7390{
6723 struct lpfc_hba *phba; 7391 struct lpfc_hba *phba;
6724 struct lpfc_vport *vport = NULL; 7392 struct lpfc_vport *vport = NULL;
7393 struct Scsi_Host *shost = NULL;
6725 int error; 7394 int error;
6726 uint32_t cfg_mode, intr_mode; 7395 uint32_t cfg_mode, intr_mode;
6727 7396
@@ -6800,6 +7469,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6800 goto out_destroy_shost; 7469 goto out_destroy_shost;
6801 } 7470 }
6802 7471
7472 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
6803 /* Now, trying to enable interrupt and bring up the device */ 7473 /* Now, trying to enable interrupt and bring up the device */
6804 cfg_mode = phba->cfg_use_msi; 7474 cfg_mode = phba->cfg_use_msi;
6805 while (true) { 7475 while (true) {
@@ -6866,6 +7536,8 @@ out_unset_pci_mem_s3:
6866 lpfc_sli_pci_mem_unset(phba); 7536 lpfc_sli_pci_mem_unset(phba);
6867out_disable_pci_dev: 7537out_disable_pci_dev:
6868 lpfc_disable_pci_dev(phba); 7538 lpfc_disable_pci_dev(phba);
7539 if (shost)
7540 scsi_host_put(shost);
6869out_free_phba: 7541out_free_phba:
6870 lpfc_hba_free(phba); 7542 lpfc_hba_free(phba);
6871 return error; 7543 return error;
@@ -7036,6 +7708,13 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7036 /* Restore device state from PCI config space */ 7708 /* Restore device state from PCI config space */
7037 pci_set_power_state(pdev, PCI_D0); 7709 pci_set_power_state(pdev, PCI_D0);
7038 pci_restore_state(pdev); 7710 pci_restore_state(pdev);
7711
7712 /*
7713 * As the new kernel behavior of pci_restore_state() API call clears
7714 * device saved_state flag, need to save the restored state again.
7715 */
7716 pci_save_state(pdev);
7717
7039 if (pdev->is_busmaster) 7718 if (pdev->is_busmaster)
7040 pci_set_master(pdev); 7719 pci_set_master(pdev);
7041 7720
@@ -7070,6 +7749,73 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7070} 7749}
7071 7750
7072/** 7751/**
7752 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7753 * @phba: pointer to lpfc hba data structure.
7754 *
7755 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7756 * aborts and stops all the on-going I/Os on the pci device.
7757 **/
7758static void
7759lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7760{
7761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7762 "2723 PCI channel I/O abort preparing for recovery\n");
7763 /* Prepare for bringing HBA offline */
7764 lpfc_offline_prep(phba);
7765 /* Clear sli active flag to prevent sysfs access to HBA */
7766 spin_lock_irq(&phba->hbalock);
7767 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
7768 spin_unlock_irq(&phba->hbalock);
7769 /* Stop and flush all I/Os and bring HBA offline */
7770 lpfc_offline(phba);
7771}
7772
7773/**
7774 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7775 * @phba: pointer to lpfc hba data structure.
7776 *
7777 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7778 * disables the device interrupt and pci device, and aborts the internal FCP
7779 * pending I/Os.
7780 **/
7781static void
7782lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7783{
7784 struct lpfc_sli *psli = &phba->sli;
7785 struct lpfc_sli_ring *pring;
7786
7787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7788 "2710 PCI channel disable preparing for reset\n");
7789 /* Disable interrupt and pci device */
7790 lpfc_sli_disable_intr(phba);
7791 pci_disable_device(phba->pcidev);
7792 /*
7793 * There may be I/Os dropped by the firmware.
7794 * Error iocb (I/O) on txcmplq and let the SCSI layer
7795 * retry it after re-establishing link.
7796 */
7797 pring = &psli->ring[psli->fcp_ring];
7798 lpfc_sli_abort_iocb_ring(phba, pring);
7799}
7800
7801/**
7802 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7803 * @phba: pointer to lpfc hba data structure.
7804 *
7805 * This routine is called to prepare the SLI3 device for PCI slot permanently
7806 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7807 * pending I/Os.
7808 **/
7809static void
7810lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7811{
7812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7813 "2711 PCI channel permanent disable for failure\n");
7814 /* Clean up all driver's outstanding SCSI I/Os */
7815 lpfc_sli_flush_fcp_rings(phba);
7816}
7817
7818/**
7073 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7819 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7074 * @pdev: pointer to PCI device. 7820 * @pdev: pointer to PCI device.
7075 * @state: the current PCI connection state. 7821 * @state: the current PCI connection state.
@@ -7083,6 +7829,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7083 * as desired. 7829 * as desired.
7084 * 7830 *
7085 * Return codes 7831 * Return codes
7832 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7086 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7833 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7087 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7834 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7088 **/ 7835 **/
@@ -7091,33 +7838,30 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7091{ 7838{
7092 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7839 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7093 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7840 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7094 struct lpfc_sli *psli = &phba->sli;
7095 struct lpfc_sli_ring *pring;
7096 7841
7097 if (state == pci_channel_io_perm_failure) { 7842 /* Block all SCSI devices' I/Os on the host */
7098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7843 lpfc_scsi_dev_block(phba);
7099 "0472 PCI channel I/O permanent failure\n"); 7844
7100 /* Block all SCSI devices' I/Os on the host */ 7845 switch (state) {
7101 lpfc_scsi_dev_block(phba); 7846 case pci_channel_io_normal:
7102 /* Clean up all driver's outstanding SCSI I/Os */ 7847 /* Non-fatal error, prepare for recovery */
7103 lpfc_sli_flush_fcp_rings(phba); 7848 lpfc_sli_prep_dev_for_recover(phba);
7849 return PCI_ERS_RESULT_CAN_RECOVER;
7850 case pci_channel_io_frozen:
7851 /* Fatal error, prepare for slot reset */
7852 lpfc_sli_prep_dev_for_reset(phba);
7853 return PCI_ERS_RESULT_NEED_RESET;
7854 case pci_channel_io_perm_failure:
7855 /* Permanent failure, prepare for device down */
7856 lpfc_prep_dev_for_perm_failure(phba);
7104 return PCI_ERS_RESULT_DISCONNECT; 7857 return PCI_ERS_RESULT_DISCONNECT;
7858 default:
7859 /* Unknown state, prepare and request slot reset */
7860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7861 "0472 Unknown PCI error state: x%x\n", state);
7862 lpfc_sli_prep_dev_for_reset(phba);
7863 return PCI_ERS_RESULT_NEED_RESET;
7105 } 7864 }
7106
7107 pci_disable_device(pdev);
7108 /*
7109 * There may be I/Os dropped by the firmware.
7110 * Error iocb (I/O) on txcmplq and let the SCSI layer
7111 * retry it after re-establishing link.
7112 */
7113 pring = &psli->ring[psli->fcp_ring];
7114 lpfc_sli_abort_iocb_ring(phba, pring);
7115
7116 /* Disable interrupt */
7117 lpfc_sli_disable_intr(phba);
7118
7119 /* Request a slot reset. */
7120 return PCI_ERS_RESULT_NEED_RESET;
7121} 7865}
7122 7866
7123/** 7867/**
@@ -7154,6 +7898,13 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7154 } 7898 }
7155 7899
7156 pci_restore_state(pdev); 7900 pci_restore_state(pdev);
7901
7902 /*
7903 * As the new kernel behavior of pci_restore_state() API call clears
7904 * device saved_state flag, need to save the restored state again.
7905 */
7906 pci_save_state(pdev);
7907
7157 if (pdev->is_busmaster) 7908 if (pdev->is_busmaster)
7158 pci_set_master(pdev); 7909 pci_set_master(pdev);
7159 7910
@@ -7197,7 +7948,12 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
7197 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7948 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7949 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7199 7950
7951 /* Bring the device online */
7200 lpfc_online(phba); 7952 lpfc_online(phba);
7953
7954 /* Clean up Advanced Error Reporting (AER) if needed */
7955 if (phba->hba_flag & HBA_AER_ENABLED)
7956 pci_cleanup_aer_uncorrect_error_status(pdev);
7201} 7957}
7202 7958
7203/** 7959/**
@@ -7213,15 +7969,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7213 7969
7214 if (phba->sli_rev == LPFC_SLI_REV4) { 7970 if (phba->sli_rev == LPFC_SLI_REV4) {
7215 if (max_xri <= 100) 7971 if (max_xri <= 100)
7216 return 4; 7972 return 10;
7217 else if (max_xri <= 256) 7973 else if (max_xri <= 256)
7218 return 8; 7974 return 25;
7219 else if (max_xri <= 512) 7975 else if (max_xri <= 512)
7220 return 16; 7976 return 50;
7221 else if (max_xri <= 1024) 7977 else if (max_xri <= 1024)
7222 return 32; 7978 return 100;
7223 else 7979 else
7224 return 48; 7980 return 150;
7225 } else 7981 } else
7226 return 0; 7982 return 0;
7227} 7983}
@@ -7249,6 +8005,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7249{ 8005{
7250 struct lpfc_hba *phba; 8006 struct lpfc_hba *phba;
7251 struct lpfc_vport *vport = NULL; 8007 struct lpfc_vport *vport = NULL;
8008 struct Scsi_Host *shost = NULL;
7252 int error; 8009 int error;
7253 uint32_t cfg_mode, intr_mode; 8010 uint32_t cfg_mode, intr_mode;
7254 int mcnt; 8011 int mcnt;
@@ -7329,6 +8086,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7329 goto out_destroy_shost; 8086 goto out_destroy_shost;
7330 } 8087 }
7331 8088
8089 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7332 /* Now, trying to enable interrupt and bring up the device */ 8090 /* Now, trying to enable interrupt and bring up the device */
7333 cfg_mode = phba->cfg_use_msi; 8091 cfg_mode = phba->cfg_use_msi;
7334 while (true) { 8092 while (true) {
@@ -7342,6 +8100,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7342 error = -ENODEV; 8100 error = -ENODEV;
7343 goto out_free_sysfs_attr; 8101 goto out_free_sysfs_attr;
7344 } 8102 }
8103 /* Default to single FCP EQ for non-MSI-X */
8104 if (phba->intr_type != MSIX)
8105 phba->cfg_fcp_eq_count = 1;
7345 /* Set up SLI-4 HBA */ 8106 /* Set up SLI-4 HBA */
7346 if (lpfc_sli4_hba_setup(phba)) { 8107 if (lpfc_sli4_hba_setup(phba)) {
7347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7397,6 +8158,8 @@ out_unset_pci_mem_s4:
7397 lpfc_sli4_pci_mem_unset(phba); 8158 lpfc_sli4_pci_mem_unset(phba);
7398out_disable_pci_dev: 8159out_disable_pci_dev:
7399 lpfc_disable_pci_dev(phba); 8160 lpfc_disable_pci_dev(phba);
8161 if (shost)
8162 scsi_host_put(shost);
7400out_free_phba: 8163out_free_phba:
7401 lpfc_hba_free(phba); 8164 lpfc_hba_free(phba);
7402 return error; 8165 return error;
@@ -7551,6 +8314,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7551 /* Restore device state from PCI config space */ 8314 /* Restore device state from PCI config space */
7552 pci_set_power_state(pdev, PCI_D0); 8315 pci_set_power_state(pdev, PCI_D0);
7553 pci_restore_state(pdev); 8316 pci_restore_state(pdev);
8317
8318 /*
8319 * As the new kernel behavior of pci_restore_state() API call clears
8320 * device saved_state flag, need to save the restored state again.
8321 */
8322 pci_save_state(pdev);
8323
7554 if (pdev->is_busmaster) 8324 if (pdev->is_busmaster)
7555 pci_set_master(pdev); 8325 pci_set_master(pdev);
7556 8326
@@ -7670,11 +8440,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7670 int rc; 8440 int rc;
7671 struct lpfc_sli_intf intf; 8441 struct lpfc_sli_intf intf;
7672 8442
7673 if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0)) 8443 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
7674 return -ENODEV; 8444 return -ENODEV;
7675 8445
7676 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8446 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
7677 (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4)) 8447 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
7678 rc = lpfc_pci_probe_one_s4(pdev, pid); 8448 rc = lpfc_pci_probe_one_s4(pdev, pid);
7679 else 8449 else
7680 rc = lpfc_pci_probe_one_s3(pdev, pid); 8450 rc = lpfc_pci_probe_one_s3(pdev, pid);
@@ -7971,6 +8741,10 @@ static struct pci_device_id lpfc_id_table[] = {
7971 PCI_ANY_ID, PCI_ANY_ID, }, 8741 PCI_ANY_ID, PCI_ANY_ID, },
7972 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8742 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7973 PCI_ANY_ID, PCI_ANY_ID, }, 8743 PCI_ANY_ID, PCI_ANY_ID, },
8744 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8745 PCI_ANY_ID, PCI_ANY_ID, },
8746 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8747 PCI_ANY_ID, PCI_ANY_ID, },
7974 { 0 } 8748 { 0 }
7975}; 8749};
7976 8750
@@ -8053,15 +8827,15 @@ lpfc_exit(void)
8053 if (lpfc_enable_npiv) 8827 if (lpfc_enable_npiv)
8054 fc_release_transport(lpfc_vport_transport_template); 8828 fc_release_transport(lpfc_vport_transport_template);
8055 if (_dump_buf_data) { 8829 if (_dump_buf_data) {
8056 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 8830 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
8057 "at 0x%p\n", 8831 "_dump_buf_data at 0x%p\n",
8058 (1L << _dump_buf_data_order), _dump_buf_data); 8832 (1L << _dump_buf_data_order), _dump_buf_data);
8059 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8833 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8060 } 8834 }
8061 8835
8062 if (_dump_buf_dif) { 8836 if (_dump_buf_dif) {
8063 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 8837 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
8064 "at 0x%p\n", 8838 "_dump_buf_dif at 0x%p\n",
8065 (1L << _dump_buf_dif_order), _dump_buf_dif); 8839 (1L << _dump_buf_dif_order), _dump_buf_dif);
8066 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8840 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8067 } 8841 }
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 954ba57970a3..bb59e9273126 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -35,6 +35,7 @@
35#define LOG_VPORT 0x00004000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOF_SECURITY 0x00008000 /* Security events */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 39#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
39 40
40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 41#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 1ab405902a18..72e6adb0643e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -21,12 +21,13 @@
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
27#include <scsi/scsi_transport_fc.h> 28#include <scsi/scsi_transport_fc.h>
28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30#include <scsi/fc/fc_fs.h>
30 31
31#include "lpfc_hw4.h" 32#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 33#include "lpfc_hw.h"
@@ -820,6 +821,10 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
820 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; 821 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
821 mb->un.varRegVpi.sid = vport->fc_myDID; 822 mb->un.varRegVpi.sid = vport->fc_myDID;
822 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; 823 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
824 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
825 sizeof(struct lpfc_name));
826 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
827 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
823 828
824 mb->mbxCommand = MBX_REG_VPI; 829 mb->mbxCommand = MBX_REG_VPI;
825 mb->mbxOwner = OWN_HOST; 830 mb->mbxOwner = OWN_HOST;
@@ -849,7 +854,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
849 MAILBOX_t *mb = &pmb->u.mb; 854 MAILBOX_t *mb = &pmb->u.mb;
850 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 855 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
851 856
852 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; 857 if (phba->sli_rev < LPFC_SLI_REV4)
858 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
859 else
860 mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
853 861
854 mb->mbxCommand = MBX_UNREG_VPI; 862 mb->mbxCommand = MBX_UNREG_VPI;
855 mb->mbxOwner = OWN_HOST; 863 mb->mbxOwner = OWN_HOST;
@@ -1132,7 +1140,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1132 /* Otherwise we setup specific rctl / type masks for this ring */ 1140 /* Otherwise we setup specific rctl / type masks for this ring */
1133 for (i = 0; i < pring->num_mask; i++) { 1141 for (i = 0; i < pring->num_mask; i++) {
1134 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; 1142 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1135 if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ) 1143 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1136 mb->un.varCfgRing.rrRegs[i].rmask = 0xff; 1144 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1137 else 1145 else
1138 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; 1146 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
@@ -1654,9 +1662,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1654 /* Allocate record for keeping SGE virtual addresses */ 1662 /* Allocate record for keeping SGE virtual addresses */
1655 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), 1663 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1656 GFP_KERNEL); 1664 GFP_KERNEL);
1657 if (!mbox->sge_array) 1665 if (!mbox->sge_array) {
1666 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1667 "2527 Failed to allocate non-embedded SGE "
1668 "array.\n");
1658 return 0; 1669 return 0;
1659 1670 }
1660 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { 1671 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1661 /* The DMA memory is always allocated in the length of a 1672 /* The DMA memory is always allocated in the length of a
1662 * page even though the last SGE might not fill up to a 1673 * page even though the last SGE might not fill up to a
@@ -1697,7 +1708,8 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1697 alloc_len - sizeof(union lpfc_sli4_cfg_shdr); 1708 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1698 } 1709 }
1699 /* The sub-header is in DMA memory, which needs endian converstion */ 1710 /* The sub-header is in DMA memory, which needs endian converstion */
1700 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, 1711 if (cfg_shdr)
1712 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1701 sizeof(union lpfc_sli4_cfg_shdr)); 1713 sizeof(union lpfc_sli4_cfg_shdr));
1702 1714
1703 return alloc_len; 1715 return alloc_len;
@@ -1737,6 +1749,65 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1737} 1749}
1738 1750
1739/** 1751/**
1752 * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
1753 * @phba: pointer to lpfc hba data structure.
1754 * @fcf_index: index to fcf table.
1755 *
1756 * This routine routine allocates and constructs non-embedded mailbox command
1757 * for reading a FCF table entry refered by @fcf_index.
1758 *
1759 * Return: pointer to the mailbox command constructed if successful, otherwise
1760 * NULL.
1761 **/
1762int
1763lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
1764 struct lpfcMboxq *mboxq,
1765 uint16_t fcf_index)
1766{
1767 void *virt_addr;
1768 dma_addr_t phys_addr;
1769 uint8_t *bytep;
1770 struct lpfc_mbx_sge sge;
1771 uint32_t alloc_len, req_len;
1772 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1773
1774 if (!mboxq)
1775 return -ENOMEM;
1776
1777 req_len = sizeof(struct fcf_record) +
1778 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
1779
1780 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
1781 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1782 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
1783 LPFC_SLI4_MBX_NEMBED);
1784
1785 if (alloc_len < req_len) {
1786 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1787 "0291 Allocated DMA memory size (x%x) is "
1788 "less than the requested DMA memory "
1789 "size (x%x)\n", alloc_len, req_len);
1790 return -ENOMEM;
1791 }
1792
1793 /* Get the first SGE entry from the non-embedded DMA memory. This
1794 * routine only uses a single SGE.
1795 */
1796 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1797 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1798 virt_addr = mboxq->sge_array->addr[0];
1799 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1800
1801 /* Set up command fields */
1802 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
1803 /* Perform necessary endian conversion */
1804 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1805 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
1806
1807 return 0;
1808}
1809
1810/**
1740 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox 1811 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1741 * @mboxq: pointer to lpfc mbox command. 1812 * @mboxq: pointer to lpfc mbox command.
1742 * 1813 *
@@ -1753,11 +1824,6 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1753 /* Set up host requested features. */ 1824 /* Set up host requested features. */
1754 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); 1825 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1755 1826
1756 if (phba->cfg_enable_fip)
1757 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1758 else
1759 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
1760
1761 /* Enable DIF (block guard) only if configured to do so. */ 1827 /* Enable DIF (block guard) only if configured to do so. */
1762 if (phba->cfg_enable_bg) 1828 if (phba->cfg_enable_bg)
1763 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); 1829 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
@@ -1817,6 +1883,9 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1817 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); 1883 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1818 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 1884 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1819 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); 1885 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1886 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
1887 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
1888 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
1820 reg_vfi->bde.addrHigh = putPaddrHigh(phys); 1889 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1821 reg_vfi->bde.addrLow = putPaddrLow(phys); 1890 reg_vfi->bde.addrLow = putPaddrLow(phys);
1822 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 1891 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
@@ -1850,7 +1919,7 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1850/** 1919/**
1851 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command 1920 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1852 * @mbox: pointer to lpfc mbox command to initialize. 1921 * @mbox: pointer to lpfc mbox command to initialize.
1853 * @vfi: VFI to be unregistered. 1922 * @vport: vport associated with the VF.
1854 * 1923 *
1855 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric 1924 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1856 * (logical NPort) into the inactive state. The SLI Host must have logged out 1925 * (logical NPort) into the inactive state. The SLI Host must have logged out
@@ -1859,11 +1928,12 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1859 * fabric inactive. 1928 * fabric inactive.
1860 **/ 1929 **/
1861void 1930void
1862lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi) 1931lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1863{ 1932{
1864 memset(mbox, 0, sizeof(*mbox)); 1933 memset(mbox, 0, sizeof(*mbox));
1865 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); 1934 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1866 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi); 1935 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
1936 vport->vfi + vport->phba->vfi_base);
1867} 1937}
1868 1938
1869/** 1939/**
@@ -1937,13 +2007,14 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1937 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); 2007 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1938 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); 2008 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1939 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); 2009 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1940 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx); 2010 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2011 phba->fcf.current_rec.fcf_indx);
1941 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ 2012 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1942 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, 2013 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
1943 (~phba->fcf.addr_mode) & 0x3); 2014 if (phba->fcf.current_rec.vlan_id != 0xFFFF) {
1944 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1945 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); 2015 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1946 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id); 2016 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2017 phba->fcf.current_rec.vlan_id);
1947 } 2018 }
1948} 2019}
1949 2020
@@ -1983,3 +2054,41 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1983 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); 2054 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
1984 resume_rpi->event_tag = ndlp->phba->fc_eventTag; 2055 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
1985} 2056}
2057
2058/**
2059 * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
2060 * mailbox command.
2061 * @mbox: pointer to lpfc mbox command to initialize.
2062 *
2063 * The PORT_CAPABILITIES supported pages mailbox command is issued to
2064 * retrieve the particular feature pages supported by the port.
2065 **/
2066void
2067lpfc_supported_pages(struct lpfcMboxq *mbox)
2068{
2069 struct lpfc_mbx_supp_pages *supp_pages;
2070
2071 memset(mbox, 0, sizeof(*mbox));
2072 supp_pages = &mbox->u.mqe.un.supp_pages;
2073 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2074 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2075}
2076
2077/**
2078 * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
2079 * mailbox command.
2080 * @mbox: pointer to lpfc mbox command to initialize.
2081 *
2082 * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
2083 * retrieve the particular SLI4 features supported by the port.
2084 **/
2085void
2086lpfc_sli4_params(struct lpfcMboxq *mbox)
2087{
2088 struct lpfc_mbx_sli4_params *sli4_params;
2089
2090 memset(mbox, 0, sizeof(*mbox));
2091 sli4_params = &mbox->u.mqe.un.sli4_params;
2092 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2093 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2094}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index a1b6db6016da..8f879e477e9d 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -20,6 +20,7 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <linux/mempool.h> 22#include <linux/mempool.h>
23#include <linux/slab.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index d655ed3eebef..f3cfbe2ce986 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2008 Emulex. All rights reserved. * 4 * Copyright (C) 2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -177,23 +177,3 @@ struct temp_event {
177 uint32_t data; 177 uint32_t data;
178}; 178};
179 179
180/* bsg definitions */
181#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
182#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
183
184struct set_ct_event {
185 uint32_t command;
186 uint32_t ev_req_id;
187 uint32_t ev_reg_id;
188};
189
190struct get_ct_event {
191 uint32_t command;
192 uint32_t ev_reg_id;
193 uint32_t ev_req_id;
194};
195
196struct get_ct_event_reply {
197 uint32_t immed_data;
198 uint32_t type;
199};
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 3e74136f1ede..e331204a4d56 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
@@ -62,7 +63,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
62 63
63int 64int
64lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 65lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
65 struct serv_parm * sp, uint32_t class) 66 struct serv_parm *sp, uint32_t class, int flogi)
66{ 67{
67 volatile struct serv_parm *hsp = &vport->fc_sparam; 68 volatile struct serv_parm *hsp = &vport->fc_sparam;
68 uint16_t hsp_value, ssp_value = 0; 69 uint16_t hsp_value, ssp_value = 0;
@@ -75,49 +76,56 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
75 * correcting the byte values. 76 * correcting the byte values.
76 */ 77 */
77 if (sp->cls1.classValid) { 78 if (sp->cls1.classValid) {
78 hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) | 79 if (!flogi) {
79 hsp->cls1.rcvDataSizeLsb; 80 hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
80 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 81 hsp->cls1.rcvDataSizeLsb);
81 sp->cls1.rcvDataSizeLsb; 82 ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
82 if (!ssp_value) 83 sp->cls1.rcvDataSizeLsb);
83 goto bad_service_param; 84 if (!ssp_value)
84 if (ssp_value > hsp_value) { 85 goto bad_service_param;
85 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 86 if (ssp_value > hsp_value) {
86 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 87 sp->cls1.rcvDataSizeLsb =
88 hsp->cls1.rcvDataSizeLsb;
89 sp->cls1.rcvDataSizeMsb =
90 hsp->cls1.rcvDataSizeMsb;
91 }
87 } 92 }
88 } else if (class == CLASS1) { 93 } else if (class == CLASS1)
89 goto bad_service_param; 94 goto bad_service_param;
90 }
91
92 if (sp->cls2.classValid) { 95 if (sp->cls2.classValid) {
93 hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) | 96 if (!flogi) {
94 hsp->cls2.rcvDataSizeLsb; 97 hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
95 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 98 hsp->cls2.rcvDataSizeLsb);
96 sp->cls2.rcvDataSizeLsb; 99 ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
97 if (!ssp_value) 100 sp->cls2.rcvDataSizeLsb);
98 goto bad_service_param; 101 if (!ssp_value)
99 if (ssp_value > hsp_value) { 102 goto bad_service_param;
100 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 103 if (ssp_value > hsp_value) {
101 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 104 sp->cls2.rcvDataSizeLsb =
105 hsp->cls2.rcvDataSizeLsb;
106 sp->cls2.rcvDataSizeMsb =
107 hsp->cls2.rcvDataSizeMsb;
108 }
102 } 109 }
103 } else if (class == CLASS2) { 110 } else if (class == CLASS2)
104 goto bad_service_param; 111 goto bad_service_param;
105 }
106
107 if (sp->cls3.classValid) { 112 if (sp->cls3.classValid) {
108 hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) | 113 if (!flogi) {
109 hsp->cls3.rcvDataSizeLsb; 114 hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
110 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 115 hsp->cls3.rcvDataSizeLsb);
111 sp->cls3.rcvDataSizeLsb; 116 ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
112 if (!ssp_value) 117 sp->cls3.rcvDataSizeLsb);
113 goto bad_service_param; 118 if (!ssp_value)
114 if (ssp_value > hsp_value) { 119 goto bad_service_param;
115 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 120 if (ssp_value > hsp_value) {
116 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 121 sp->cls3.rcvDataSizeLsb =
122 hsp->cls3.rcvDataSizeLsb;
123 sp->cls3.rcvDataSizeMsb =
124 hsp->cls3.rcvDataSizeMsb;
125 }
117 } 126 }
118 } else if (class == CLASS3) { 127 } else if (class == CLASS3)
119 goto bad_service_param; 128 goto bad_service_param;
120 }
121 129
122 /* 130 /*
123 * Preserve the upper four bits of the MSB from the PLOGI response. 131 * Preserve the upper four bits of the MSB from the PLOGI response.
@@ -247,7 +255,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
247 int rc; 255 int rc;
248 256
249 memset(&stat, 0, sizeof (struct ls_rjt)); 257 memset(&stat, 0, sizeof (struct ls_rjt));
250 if (vport->port_state <= LPFC_FLOGI) { 258 if (vport->port_state <= LPFC_FDISC) {
251 /* Before responding to PLOGI, check for pt2pt mode. 259 /* Before responding to PLOGI, check for pt2pt mode.
252 * If we are pt2pt, with an outstanding FLOGI, abort 260 * If we are pt2pt, with an outstanding FLOGI, abort
253 * the FLOGI and resend it first. 261 * the FLOGI and resend it first.
@@ -295,7 +303,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
295 NULL); 303 NULL);
296 return 0; 304 return 0;
297 } 305 }
298 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { 306 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
299 /* Reject this request because invalid parameters */ 307 /* Reject this request because invalid parameters */
300 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 308 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
301 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 309 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
@@ -831,7 +839,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
831 "0142 PLOGI RSP: Invalid WWN.\n"); 839 "0142 PLOGI RSP: Invalid WWN.\n");
832 goto out; 840 goto out;
833 } 841 }
834 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) 842 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
835 goto out; 843 goto out;
836 /* PLOGI chkparm OK */ 844 /* PLOGI chkparm OK */
837 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 845 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -1223,6 +1231,12 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1231 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1232 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1233 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1234 if (phba->sli_rev == LPFC_SLI_REV4) {
1235 spin_unlock_irq(&phba->hbalock);
1236 lpfc_sli4_free_rpi(phba,
1237 mb->u.mb.un.varRegLogin.rpi);
1238 spin_lock_irq(&phba->hbalock);
1239 }
1226 mp = (struct lpfc_dmabuf *) (mb->context1); 1240 mp = (struct lpfc_dmabuf *) (mb->context1);
1227 if (mp) { 1241 if (mp) {
1228 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1242 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1230,6 +1244,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1230 } 1244 }
1231 lpfc_nlp_put(ndlp); 1245 lpfc_nlp_put(ndlp);
1232 list_del(&mb->list); 1246 list_del(&mb->list);
1247 phba->sli.mboxq_cnt--;
1233 mempool_free(mb, phba->mbox_mem_pool); 1248 mempool_free(mb, phba->mbox_mem_pool);
1234 } 1249 }
1235 } 1250 }
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c88f59f0ce30..dccdb822328c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -19,6 +19,7 @@
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/slab.h>
22#include <linux/interrupt.h> 23#include <linux/interrupt.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
@@ -59,22 +60,26 @@ static char *dif_op_str[] = {
59}; 60};
60static void 61static void
61lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 62lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
63static void
64lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
62 65
63static void 66static void
64lpfc_debug_save_data(struct scsi_cmnd *cmnd) 67lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
65{ 68{
66 void *src, *dst; 69 void *src, *dst;
67 struct scatterlist *sgde = scsi_sglist(cmnd); 70 struct scatterlist *sgde = scsi_sglist(cmnd);
68 71
69 if (!_dump_buf_data) { 72 if (!_dump_buf_data) {
70 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", 73 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
71 __func__); 75 __func__);
72 return; 76 return;
73 } 77 }
74 78
75 79
76 if (!sgde) { 80 if (!sgde) {
77 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n"); 81 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
82 "9051 BLKGRD: ERROR: data scatterlist is null\n");
78 return; 83 return;
79 } 84 }
80 85
@@ -88,19 +93,21 @@ lpfc_debug_save_data(struct scsi_cmnd *cmnd)
88} 93}
89 94
90static void 95static void
91lpfc_debug_save_dif(struct scsi_cmnd *cmnd) 96lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
92{ 97{
93 void *src, *dst; 98 void *src, *dst;
94 struct scatterlist *sgde = scsi_prot_sglist(cmnd); 99 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
95 100
96 if (!_dump_buf_dif) { 101 if (!_dump_buf_dif) {
97 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", 102 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
98 __func__); 104 __func__);
99 return; 105 return;
100 } 106 }
101 107
102 if (!sgde) { 108 if (!sgde) {
103 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n"); 109 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
110 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
104 return; 111 return;
105 } 112 }
106 113
@@ -242,6 +249,36 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
242} 249}
243 250
244/** 251/**
252 * lpfc_change_queue_depth - Alter scsi device queue depth
253 * @sdev: Pointer the scsi device on which to change the queue depth.
254 * @qdepth: New queue depth to set the sdev to.
255 * @reason: The reason for the queue depth change.
256 *
257 * This function is called by the midlayer and the LLD to alter the queue
258 * depth for a scsi device. This function sets the queue depth to the new
259 * value and sends an event out to log the queue depth change.
260 **/
261int
262lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
263{
264 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
265 struct lpfc_hba *phba = vport->phba;
266 struct lpfc_rport_data *rdata;
267 unsigned long new_queue_depth, old_queue_depth;
268
269 old_queue_depth = sdev->queue_depth;
270 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
271 new_queue_depth = sdev->queue_depth;
272 rdata = sdev->hostdata;
273 if (rdata)
274 lpfc_send_sdev_queuedepth_change_event(phba, vport,
275 rdata->pnode, sdev->lun,
276 old_queue_depth,
277 new_queue_depth);
278 return sdev->queue_depth;
279}
280
281/**
245 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 282 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
246 * @phba: The Hba for which this call is being executed. 283 * @phba: The Hba for which this call is being executed.
247 * 284 *
@@ -305,8 +342,10 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
305 if (vport->cfg_lun_queue_depth <= queue_depth) 342 if (vport->cfg_lun_queue_depth <= queue_depth)
306 return; 343 return;
307 spin_lock_irqsave(&phba->hbalock, flags); 344 spin_lock_irqsave(&phba->hbalock, flags);
308 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || 345 if (time_before(jiffies,
309 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { 346 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
347 time_before(jiffies,
348 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
310 spin_unlock_irqrestore(&phba->hbalock, flags); 349 spin_unlock_irqrestore(&phba->hbalock, flags);
311 return; 350 return;
312 } 351 }
@@ -338,10 +377,9 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
338 struct lpfc_vport **vports; 377 struct lpfc_vport **vports;
339 struct Scsi_Host *shost; 378 struct Scsi_Host *shost;
340 struct scsi_device *sdev; 379 struct scsi_device *sdev;
341 unsigned long new_queue_depth, old_queue_depth; 380 unsigned long new_queue_depth;
342 unsigned long num_rsrc_err, num_cmd_success; 381 unsigned long num_rsrc_err, num_cmd_success;
343 int i; 382 int i;
344 struct lpfc_rport_data *rdata;
345 383
346 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 384 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
347 num_cmd_success = atomic_read(&phba->num_cmd_success); 385 num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -359,22 +397,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
359 else 397 else
360 new_queue_depth = sdev->queue_depth - 398 new_queue_depth = sdev->queue_depth -
361 new_queue_depth; 399 new_queue_depth;
362 old_queue_depth = sdev->queue_depth; 400 lpfc_change_queue_depth(sdev, new_queue_depth,
363 if (sdev->ordered_tags) 401 SCSI_QDEPTH_DEFAULT);
364 scsi_adjust_queue_depth(sdev,
365 MSG_ORDERED_TAG,
366 new_queue_depth);
367 else
368 scsi_adjust_queue_depth(sdev,
369 MSG_SIMPLE_TAG,
370 new_queue_depth);
371 rdata = sdev->hostdata;
372 if (rdata)
373 lpfc_send_sdev_queuedepth_change_event(
374 phba, vports[i],
375 rdata->pnode,
376 sdev->lun, old_queue_depth,
377 new_queue_depth);
378 } 402 }
379 } 403 }
380 lpfc_destroy_vport_work_array(phba, vports); 404 lpfc_destroy_vport_work_array(phba, vports);
@@ -398,7 +422,6 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
398 struct Scsi_Host *shost; 422 struct Scsi_Host *shost;
399 struct scsi_device *sdev; 423 struct scsi_device *sdev;
400 int i; 424 int i;
401 struct lpfc_rport_data *rdata;
402 425
403 vports = lpfc_create_vport_work_array(phba); 426 vports = lpfc_create_vport_work_array(phba);
404 if (vports != NULL) 427 if (vports != NULL)
@@ -408,22 +431,9 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
408 if (vports[i]->cfg_lun_queue_depth <= 431 if (vports[i]->cfg_lun_queue_depth <=
409 sdev->queue_depth) 432 sdev->queue_depth)
410 continue; 433 continue;
411 if (sdev->ordered_tags) 434 lpfc_change_queue_depth(sdev,
412 scsi_adjust_queue_depth(sdev, 435 sdev->queue_depth+1,
413 MSG_ORDERED_TAG, 436 SCSI_QDEPTH_RAMP_UP);
414 sdev->queue_depth+1);
415 else
416 scsi_adjust_queue_depth(sdev,
417 MSG_SIMPLE_TAG,
418 sdev->queue_depth+1);
419 rdata = sdev->hostdata;
420 if (rdata)
421 lpfc_send_sdev_queuedepth_change_event(
422 phba, vports[i],
423 rdata->pnode,
424 sdev->lun,
425 sdev->queue_depth - 1,
426 sdev->queue_depth);
427 } 437 }
428 } 438 }
429 lpfc_destroy_vport_work_array(phba, vports); 439 lpfc_destroy_vport_work_array(phba, vports);
@@ -589,7 +599,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
589 iocb->ulpClass = CLASS3; 599 iocb->ulpClass = CLASS3;
590 psb->status = IOSTAT_SUCCESS; 600 psb->status = IOSTAT_SUCCESS;
591 /* Put it back into the SCSI buffer list */ 601 /* Put it back into the SCSI buffer list */
592 lpfc_release_scsi_buf_s4(phba, psb); 602 lpfc_release_scsi_buf_s3(phba, psb);
593 603
594 } 604 }
595 605
@@ -611,22 +621,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
611 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 621 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
612 struct lpfc_scsi_buf *psb, *next_psb; 622 struct lpfc_scsi_buf *psb, *next_psb;
613 unsigned long iflag = 0; 623 unsigned long iflag = 0;
624 struct lpfc_iocbq *iocbq;
625 int i;
614 626
615 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); 627 spin_lock_irqsave(&phba->hbalock, iflag);
628 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
616 list_for_each_entry_safe(psb, next_psb, 629 list_for_each_entry_safe(psb, next_psb,
617 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 630 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
618 if (psb->cur_iocbq.sli4_xritag == xri) { 631 if (psb->cur_iocbq.sli4_xritag == xri) {
619 list_del(&psb->list); 632 list_del(&psb->list);
633 psb->exch_busy = 0;
620 psb->status = IOSTAT_SUCCESS; 634 psb->status = IOSTAT_SUCCESS;
621 spin_unlock_irqrestore( 635 spin_unlock(
622 &phba->sli4_hba.abts_scsi_buf_list_lock, 636 &phba->sli4_hba.abts_scsi_buf_list_lock);
623 iflag); 637 spin_unlock_irqrestore(&phba->hbalock, iflag);
624 lpfc_release_scsi_buf_s4(phba, psb); 638 lpfc_release_scsi_buf_s4(phba, psb);
625 return; 639 return;
626 } 640 }
627 } 641 }
628 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 642 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
629 iflag); 643 for (i = 1; i <= phba->sli.last_iotag; i++) {
644 iocbq = phba->sli.iocbq_lookup[i];
645
646 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
647 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
648 continue;
649 if (iocbq->sli4_xritag != xri)
650 continue;
651 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
652 psb->exch_busy = 0;
653 spin_unlock_irqrestore(&phba->hbalock, iflag);
654 return;
655
656 }
657 spin_unlock_irqrestore(&phba->hbalock, iflag);
630} 658}
631 659
632/** 660/**
@@ -679,11 +707,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
679 list); 707 list);
680 if (status) { 708 if (status) {
681 /* Put this back on the abort scsi list */ 709 /* Put this back on the abort scsi list */
682 psb->status = IOSTAT_LOCAL_REJECT; 710 psb->exch_busy = 1;
683 psb->result = IOERR_ABORT_REQUESTED;
684 rc++; 711 rc++;
685 } else 712 } else {
713 psb->exch_busy = 0;
686 psb->status = IOSTAT_SUCCESS; 714 psb->status = IOSTAT_SUCCESS;
715 }
687 /* Put it back into the SCSI buffer list */ 716 /* Put it back into the SCSI buffer list */
688 lpfc_release_scsi_buf_s4(phba, psb); 717 lpfc_release_scsi_buf_s4(phba, psb);
689 } 718 }
@@ -787,19 +816,17 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
787 */ 816 */
788 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 817 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
789 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 818 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
790 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
791 bf_set(lpfc_sli4_sge_last, sgl, 0); 819 bf_set(lpfc_sli4_sge_last, sgl, 0);
792 sgl->word2 = cpu_to_le32(sgl->word2); 820 sgl->word2 = cpu_to_le32(sgl->word2);
793 sgl->word3 = cpu_to_le32(sgl->word3); 821 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
794 sgl++; 822 sgl++;
795 823
796 /* Setup the physical region for the FCP RSP */ 824 /* Setup the physical region for the FCP RSP */
797 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 825 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
798 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 826 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
799 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
800 bf_set(lpfc_sli4_sge_last, sgl, 1); 827 bf_set(lpfc_sli4_sge_last, sgl, 1);
801 sgl->word2 = cpu_to_le32(sgl->word2); 828 sgl->word2 = cpu_to_le32(sgl->word2);
802 sgl->word3 = cpu_to_le32(sgl->word3); 829 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
803 830
804 /* 831 /*
805 * Since the IOCB for the FCP I/O is built into this 832 * Since the IOCB for the FCP I/O is built into this
@@ -830,11 +857,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
830 psb->cur_iocbq.sli4_xritag); 857 psb->cur_iocbq.sli4_xritag);
831 if (status) { 858 if (status) {
832 /* Put this back on the abort scsi list */ 859 /* Put this back on the abort scsi list */
833 psb->status = IOSTAT_LOCAL_REJECT; 860 psb->exch_busy = 1;
834 psb->result = IOERR_ABORT_REQUESTED;
835 rc++; 861 rc++;
836 } else 862 } else {
863 psb->exch_busy = 0;
837 psb->status = IOSTAT_SUCCESS; 864 psb->status = IOSTAT_SUCCESS;
865 }
838 /* Put it back into the SCSI buffer list */ 866 /* Put it back into the SCSI buffer list */
839 lpfc_release_scsi_buf_s4(phba, psb); 867 lpfc_release_scsi_buf_s4(phba, psb);
840 break; 868 break;
@@ -848,11 +876,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
848 list); 876 list);
849 if (status) { 877 if (status) {
850 /* Put this back on the abort scsi list */ 878 /* Put this back on the abort scsi list */
851 psb->status = IOSTAT_LOCAL_REJECT; 879 psb->exch_busy = 1;
852 psb->result = IOERR_ABORT_REQUESTED;
853 rc++; 880 rc++;
854 } else 881 } else {
882 psb->exch_busy = 0;
855 psb->status = IOSTAT_SUCCESS; 883 psb->status = IOSTAT_SUCCESS;
884 }
856 /* Put it back into the SCSI buffer list */ 885 /* Put it back into the SCSI buffer list */
857 lpfc_release_scsi_buf_s4(phba, psb); 886 lpfc_release_scsi_buf_s4(phba, psb);
858 } 887 }
@@ -942,8 +971,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
942{ 971{
943 unsigned long iflag = 0; 972 unsigned long iflag = 0;
944 973
945 if (psb->status == IOSTAT_LOCAL_REJECT 974 if (psb->exch_busy) {
946 && psb->result == IOERR_ABORT_REQUESTED) {
947 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 975 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
948 iflag); 976 iflag);
949 psb->pCmd = NULL; 977 psb->pCmd = NULL;
@@ -996,6 +1024,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
996 struct scatterlist *sgel = NULL; 1024 struct scatterlist *sgel = NULL;
997 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1025 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
998 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1026 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1027 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
999 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1028 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1000 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1029 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1001 dma_addr_t physaddr; 1030 dma_addr_t physaddr;
@@ -1024,7 +1053,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1024 1053
1025 lpfc_cmd->seg_cnt = nseg; 1054 lpfc_cmd->seg_cnt = nseg;
1026 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1055 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1027 printk(KERN_ERR "%s: Too many sg segments from " 1056 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1057 "9064 BLKGRD: %s: Too many sg segments from "
1028 "dma_map_sg. Config %d, seg_cnt %d\n", 1058 "dma_map_sg. Config %d, seg_cnt %d\n",
1029 __func__, phba->cfg_sg_seg_cnt, 1059 __func__, phba->cfg_sg_seg_cnt,
1030 lpfc_cmd->seg_cnt); 1060 lpfc_cmd->seg_cnt);
@@ -1045,6 +1075,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1045 physaddr = sg_dma_address(sgel); 1075 physaddr = sg_dma_address(sgel);
1046 if (phba->sli_rev == 3 && 1076 if (phba->sli_rev == 3 &&
1047 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1077 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1078 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1048 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1079 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1049 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1080 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1050 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 1081 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -1071,7 +1102,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1071 * explicitly reinitialized since all iocb memory resources are reused. 1102 * explicitly reinitialized since all iocb memory resources are reused.
1072 */ 1103 */
1073 if (phba->sli_rev == 3 && 1104 if (phba->sli_rev == 3 &&
1074 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 1105 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1106 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1075 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1107 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1076 /* 1108 /*
1077 * The extended IOCB format can only fit 3 BDE or a BPL. 1109 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -1096,6 +1128,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1096 } else { 1128 } else {
1097 iocb_cmd->un.fcpi64.bdl.bdeSize = 1129 iocb_cmd->un.fcpi64.bdl.bdeSize =
1098 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1130 ((num_bde + 2) * sizeof(struct ulp_bde64));
1131 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1099 } 1132 }
1100 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1133 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1101 1134
@@ -1112,7 +1145,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1112 * with the cmd 1145 * with the cmd
1113 */ 1146 */
1114static int 1147static int
1115lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) 1148lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1116{ 1149{
1117 uint8_t guard_type = scsi_host_get_guard(sc->device->host); 1150 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1118 uint8_t ret_prof = LPFC_PROF_INVALID; 1151 uint8_t ret_prof = LPFC_PROF_INVALID;
@@ -1136,7 +1169,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
1136 1169
1137 case SCSI_PROT_NORMAL: 1170 case SCSI_PROT_NORMAL:
1138 default: 1171 default:
1139 printk(KERN_ERR "Bad op/guard:%d/%d combination\n", 1172 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1173 "9063 BLKGRD:Bad op/guard:%d/%d combination\n",
1140 scsi_get_prot_op(sc), guard_type); 1174 scsi_get_prot_op(sc), guard_type);
1141 break; 1175 break;
1142 1176
@@ -1157,7 +1191,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
1157 case SCSI_PROT_WRITE_STRIP: 1191 case SCSI_PROT_WRITE_STRIP:
1158 case SCSI_PROT_NORMAL: 1192 case SCSI_PROT_NORMAL:
1159 default: 1193 default:
1160 printk(KERN_ERR "Bad op/guard:%d/%d combination\n", 1194 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1195 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
1161 scsi_get_prot_op(sc), guard_type); 1196 scsi_get_prot_op(sc), guard_type);
1162 break; 1197 break;
1163 } 1198 }
@@ -1259,7 +1294,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1259 uint16_t apptagmask, apptagval; 1294 uint16_t apptagmask, apptagval;
1260 1295
1261 pde1 = (struct lpfc_pde *) bpl; 1296 pde1 = (struct lpfc_pde *) bpl;
1262 prof = lpfc_sc_to_sli_prof(sc); 1297 prof = lpfc_sc_to_sli_prof(phba, sc);
1263 1298
1264 if (prof == LPFC_PROF_INVALID) 1299 if (prof == LPFC_PROF_INVALID)
1265 goto out; 1300 goto out;
@@ -1359,7 +1394,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1359 return 0; 1394 return 0;
1360 } 1395 }
1361 1396
1362 prof = lpfc_sc_to_sli_prof(sc); 1397 prof = lpfc_sc_to_sli_prof(phba, sc);
1363 if (prof == LPFC_PROF_INVALID) 1398 if (prof == LPFC_PROF_INVALID)
1364 goto out; 1399 goto out;
1365 1400
@@ -1408,7 +1443,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1408 subtotal = 0; /* total bytes processed for current prot grp */ 1443 subtotal = 0; /* total bytes processed for current prot grp */
1409 while (!pgdone) { 1444 while (!pgdone) {
1410 if (!sgde) { 1445 if (!sgde) {
1411 printk(KERN_ERR "%s Invalid data segment\n", 1446 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1447 "9065 BLKGRD:%s Invalid data segment\n",
1412 __func__); 1448 __func__);
1413 return 0; 1449 return 0;
1414 } 1450 }
@@ -1462,7 +1498,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1462 reftag += protgrp_blks; 1498 reftag += protgrp_blks;
1463 } else { 1499 } else {
1464 /* if we're here, we have a bug */ 1500 /* if we're here, we have a bug */
1465 printk(KERN_ERR "BLKGRD: bug in %s\n", __func__); 1501 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1502 "9054 BLKGRD: bug in %s\n", __func__);
1466 } 1503 }
1467 1504
1468 } while (!alldone); 1505 } while (!alldone);
@@ -1544,8 +1581,10 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1544 1581
1545 lpfc_cmd->seg_cnt = datasegcnt; 1582 lpfc_cmd->seg_cnt = datasegcnt;
1546 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1583 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1547 printk(KERN_ERR "%s: Too many sg segments from " 1584 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1548 "dma_map_sg. Config %d, seg_cnt %d\n", 1585 "9067 BLKGRD: %s: Too many sg segments"
1586 " from dma_map_sg. Config %d, seg_cnt"
1587 " %d\n",
1549 __func__, phba->cfg_sg_seg_cnt, 1588 __func__, phba->cfg_sg_seg_cnt,
1550 lpfc_cmd->seg_cnt); 1589 lpfc_cmd->seg_cnt);
1551 scsi_dma_unmap(scsi_cmnd); 1590 scsi_dma_unmap(scsi_cmnd);
@@ -1558,7 +1597,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1558 case LPFC_PG_TYPE_NO_DIF: 1597 case LPFC_PG_TYPE_NO_DIF:
1559 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 1598 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1560 datasegcnt); 1599 datasegcnt);
1561 /* we shoud have 2 or more entries in buffer list */ 1600 /* we should have 2 or more entries in buffer list */
1562 if (num_bde < 2) 1601 if (num_bde < 2)
1563 goto err; 1602 goto err;
1564 break; 1603 break;
@@ -1579,8 +1618,9 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1579 lpfc_cmd->prot_seg_cnt = protsegcnt; 1618 lpfc_cmd->prot_seg_cnt = protsegcnt;
1580 if (lpfc_cmd->prot_seg_cnt 1619 if (lpfc_cmd->prot_seg_cnt
1581 > phba->cfg_prot_sg_seg_cnt) { 1620 > phba->cfg_prot_sg_seg_cnt) {
1582 printk(KERN_ERR "%s: Too many prot sg segments " 1621 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1583 "from dma_map_sg. Config %d," 1622 "9068 BLKGRD: %s: Too many prot sg "
1623 "segments from dma_map_sg. Config %d,"
1584 "prot_seg_cnt %d\n", __func__, 1624 "prot_seg_cnt %d\n", __func__,
1585 phba->cfg_prot_sg_seg_cnt, 1625 phba->cfg_prot_sg_seg_cnt,
1586 lpfc_cmd->prot_seg_cnt); 1626 lpfc_cmd->prot_seg_cnt);
@@ -1594,7 +1634,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1594 1634
1595 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 1635 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1596 datasegcnt, protsegcnt); 1636 datasegcnt, protsegcnt);
1597 /* we shoud have 3 or more entries in buffer list */ 1637 /* we should have 3 or more entries in buffer list */
1598 if (num_bde < 3) 1638 if (num_bde < 3)
1599 goto err; 1639 goto err;
1600 break; 1640 break;
@@ -1671,23 +1711,26 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1671 uint32_t bgstat = bgf->bgstat; 1711 uint32_t bgstat = bgf->bgstat;
1672 uint64_t failing_sector = 0; 1712 uint64_t failing_sector = 0;
1673 1713
1674 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x " 1714 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
1715 " 0x%x lba 0x%llx blk cnt 0x%x "
1675 "bgstat=0x%x bghm=0x%x\n", 1716 "bgstat=0x%x bghm=0x%x\n",
1676 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1717 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1677 blk_rq_sectors(cmd->request), bgstat, bghm); 1718 blk_rq_sectors(cmd->request), bgstat, bghm);
1678 1719
1679 spin_lock(&_dump_buf_lock); 1720 spin_lock(&_dump_buf_lock);
1680 if (!_dump_buf_done) { 1721 if (!_dump_buf_done) {
1681 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n", 1722 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
1723 " Data for %u blocks to debugfs\n",
1682 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1724 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1683 lpfc_debug_save_data(cmd); 1725 lpfc_debug_save_data(phba, cmd);
1684 1726
1685 /* If we have a prot sgl, save the DIF buffer */ 1727 /* If we have a prot sgl, save the DIF buffer */
1686 if (lpfc_prot_group_type(phba, cmd) == 1728 if (lpfc_prot_group_type(phba, cmd) ==
1687 LPFC_PG_TYPE_DIF_BUF) { 1729 LPFC_PG_TYPE_DIF_BUF) {
1688 printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n", 1730 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
1689 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1731 "Saving DIF for %u blocks to debugfs\n",
1690 lpfc_debug_save_dif(cmd); 1732 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1733 lpfc_debug_save_dif(phba, cmd);
1691 } 1734 }
1692 1735
1693 _dump_buf_done = 1; 1736 _dump_buf_done = 1;
@@ -1696,15 +1739,17 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1696 1739
1697 if (lpfc_bgs_get_invalid_prof(bgstat)) { 1740 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1698 cmd->result = ScsiResult(DID_ERROR, 0); 1741 cmd->result = ScsiResult(DID_ERROR, 0);
1699 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n", 1742 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
1700 bgstat); 1743 " BlockGuard profile. bgstat:0x%x\n",
1744 bgstat);
1701 ret = (-1); 1745 ret = (-1);
1702 goto out; 1746 goto out;
1703 } 1747 }
1704 1748
1705 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 1749 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1706 cmd->result = ScsiResult(DID_ERROR, 0); 1750 cmd->result = ScsiResult(DID_ERROR, 0);
1707 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n", 1751 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
1752 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1708 bgstat); 1753 bgstat);
1709 ret = (-1); 1754 ret = (-1);
1710 goto out; 1755 goto out;
@@ -1718,7 +1763,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1718 cmd->result = DRIVER_SENSE << 24 1763 cmd->result = DRIVER_SENSE << 24
1719 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1764 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1720 phba->bg_guard_err_cnt++; 1765 phba->bg_guard_err_cnt++;
1721 printk(KERN_ERR "BLKGRD: guard_tag error\n"); 1766 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1767 "9055 BLKGRD: guard_tag error\n");
1722 } 1768 }
1723 1769
1724 if (lpfc_bgs_get_reftag_err(bgstat)) { 1770 if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -1730,7 +1776,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1730 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1776 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1731 1777
1732 phba->bg_reftag_err_cnt++; 1778 phba->bg_reftag_err_cnt++;
1733 printk(KERN_ERR "BLKGRD: ref_tag error\n"); 1779 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1780 "9056 BLKGRD: ref_tag error\n");
1734 } 1781 }
1735 1782
1736 if (lpfc_bgs_get_apptag_err(bgstat)) { 1783 if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -1742,7 +1789,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1742 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1789 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1743 1790
1744 phba->bg_apptag_err_cnt++; 1791 phba->bg_apptag_err_cnt++;
1745 printk(KERN_ERR "BLKGRD: app_tag error\n"); 1792 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1793 "9061 BLKGRD: app_tag error\n");
1746 } 1794 }
1747 1795
1748 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 1796 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -1763,7 +1811,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1763 if (!ret) { 1811 if (!ret) {
1764 /* No error was reported - problem in FW? */ 1812 /* No error was reported - problem in FW? */
1765 cmd->result = ScsiResult(DID_ERROR, 0); 1813 cmd->result = ScsiResult(DID_ERROR, 0);
1766 printk(KERN_ERR "BLKGRD: no errors reported!\n"); 1814 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1815 "9057 BLKGRD: no errors reported!\n");
1767 } 1816 }
1768 1817
1769out: 1818out:
@@ -1822,9 +1871,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1822 1871
1823 lpfc_cmd->seg_cnt = nseg; 1872 lpfc_cmd->seg_cnt = nseg;
1824 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1873 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1825 printk(KERN_ERR "%s: Too many sg segments from " 1874 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
1826 "dma_map_sg. Config %d, seg_cnt %d\n", 1875 " %s: Too many sg segments from "
1827 __func__, phba->cfg_sg_seg_cnt, 1876 "dma_map_sg. Config %d, seg_cnt %d\n",
1877 __func__, phba->cfg_sg_seg_cnt,
1828 lpfc_cmd->seg_cnt); 1878 lpfc_cmd->seg_cnt);
1829 scsi_dma_unmap(scsi_cmnd); 1879 scsi_dma_unmap(scsi_cmnd);
1830 return 1; 1880 return 1;
@@ -1842,7 +1892,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1842 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 1892 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1843 physaddr = sg_dma_address(sgel); 1893 physaddr = sg_dma_address(sgel);
1844 dma_len = sg_dma_len(sgel); 1894 dma_len = sg_dma_len(sgel);
1845 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1846 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 1895 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1847 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 1896 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1848 if ((num_bde + 1) == nseg) 1897 if ((num_bde + 1) == nseg)
@@ -1851,7 +1900,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1851 bf_set(lpfc_sli4_sge_last, sgl, 0); 1900 bf_set(lpfc_sli4_sge_last, sgl, 0);
1852 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1901 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1853 sgl->word2 = cpu_to_le32(sgl->word2); 1902 sgl->word2 = cpu_to_le32(sgl->word2);
1854 sgl->word3 = cpu_to_le32(sgl->word3); 1903 sgl->sge_len = cpu_to_le32(dma_len);
1855 dma_offset += dma_len; 1904 dma_offset += dma_len;
1856 sgl++; 1905 sgl++;
1857 } 1906 }
@@ -2050,6 +2099,31 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2050 goto out; 2099 goto out;
2051 } 2100 }
2052 2101
2102 if (resp_info & RSP_LEN_VALID) {
2103 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2104 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
2105 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2106 "2719 Invalid response length: "
2107 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
2108 cmnd->device->id,
2109 cmnd->device->lun, cmnd->cmnd[0],
2110 rsplen);
2111 host_status = DID_ERROR;
2112 goto out;
2113 }
2114 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
2115 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2116 "2757 Protocol failure detected during "
2117 "processing of FCP I/O op: "
2118 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2119 cmnd->device->id,
2120 cmnd->device->lun, cmnd->cmnd[0],
2121 fcprsp->rspInfo3);
2122 host_status = DID_ERROR;
2123 goto out;
2124 }
2125 }
2126
2053 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 2127 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
2054 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 2128 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
2055 if (snslen > SCSI_SENSE_BUFFERSIZE) 2129 if (snslen > SCSI_SENSE_BUFFERSIZE)
@@ -2074,15 +2148,6 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2074 be32_to_cpu(fcprsp->rspRspLen), 2148 be32_to_cpu(fcprsp->rspRspLen),
2075 fcprsp->rspInfo3); 2149 fcprsp->rspInfo3);
2076 2150
2077 if (resp_info & RSP_LEN_VALID) {
2078 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2079 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
2080 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
2081 host_status = DID_ERROR;
2082 goto out;
2083 }
2084 }
2085
2086 scsi_set_resid(cmnd, 0); 2151 scsi_set_resid(cmnd, 0);
2087 if (resp_info & RESID_UNDER) { 2152 if (resp_info & RESID_UNDER) {
2088 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 2153 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
@@ -2180,7 +2245,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2180 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2245 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2181 int result; 2246 int result;
2182 struct scsi_device *tmp_sdev; 2247 struct scsi_device *tmp_sdev;
2183 int depth = 0; 2248 int depth;
2184 unsigned long flags; 2249 unsigned long flags;
2185 struct lpfc_fast_path_event *fast_path_evt; 2250 struct lpfc_fast_path_event *fast_path_evt;
2186 struct Scsi_Host *shost = cmd->device->host; 2251 struct Scsi_Host *shost = cmd->device->host;
@@ -2188,6 +2253,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2188 2253
2189 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 2254 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2190 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 2255 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2256 /* pick up SLI4 exhange busy status from HBA */
2257 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
2258
2191 if (pnode && NLP_CHK_NODE_ACT(pnode)) 2259 if (pnode && NLP_CHK_NODE_ACT(pnode))
2192 atomic_dec(&pnode->cmd_pending); 2260 atomic_dec(&pnode->cmd_pending);
2193 2261
@@ -2264,7 +2332,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2264 lpfc_printf_vlog(vport, KERN_WARNING, 2332 lpfc_printf_vlog(vport, KERN_WARNING,
2265 LOG_BG, 2333 LOG_BG,
2266 "9031 non-zero BGSTAT " 2334 "9031 non-zero BGSTAT "
2267 "on unprotected cmd"); 2335 "on unprotected cmd\n");
2268 } 2336 }
2269 } 2337 }
2270 2338
@@ -2347,67 +2415,29 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2347 return; 2415 return;
2348 } 2416 }
2349 2417
2350
2351 if (!result) 2418 if (!result)
2352 lpfc_rampup_queue_depth(vport, queue_depth); 2419 lpfc_rampup_queue_depth(vport, queue_depth);
2353 2420
2354 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
2355 ((jiffies - pnode->last_ramp_up_time) >
2356 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2357 ((jiffies - pnode->last_q_full_time) >
2358 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2359 (vport->cfg_lun_queue_depth > queue_depth)) {
2360 shost_for_each_device(tmp_sdev, shost) {
2361 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
2362 if (tmp_sdev->id != scsi_id)
2363 continue;
2364 if (tmp_sdev->ordered_tags)
2365 scsi_adjust_queue_depth(tmp_sdev,
2366 MSG_ORDERED_TAG,
2367 tmp_sdev->queue_depth+1);
2368 else
2369 scsi_adjust_queue_depth(tmp_sdev,
2370 MSG_SIMPLE_TAG,
2371 tmp_sdev->queue_depth+1);
2372
2373 pnode->last_ramp_up_time = jiffies;
2374 }
2375 }
2376 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
2377 0xFFFFFFFF,
2378 queue_depth , queue_depth + 1);
2379 }
2380
2381 /* 2421 /*
2382 * Check for queue full. If the lun is reporting queue full, then 2422 * Check for queue full. If the lun is reporting queue full, then
2383 * back off the lun queue depth to prevent target overloads. 2423 * back off the lun queue depth to prevent target overloads.
2384 */ 2424 */
2385 if (result == SAM_STAT_TASK_SET_FULL && pnode && 2425 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2386 NLP_CHK_NODE_ACT(pnode)) { 2426 NLP_CHK_NODE_ACT(pnode)) {
2387 pnode->last_q_full_time = jiffies;
2388
2389 shost_for_each_device(tmp_sdev, shost) { 2427 shost_for_each_device(tmp_sdev, shost) {
2390 if (tmp_sdev->id != scsi_id) 2428 if (tmp_sdev->id != scsi_id)
2391 continue; 2429 continue;
2392 depth = scsi_track_queue_full(tmp_sdev, 2430 depth = scsi_track_queue_full(tmp_sdev,
2393 tmp_sdev->queue_depth - 1); 2431 tmp_sdev->queue_depth-1);
2394 } 2432 if (depth <= 0)
2395 /* 2433 continue;
2396 * The queue depth cannot be lowered any more.
2397 * Modify the returned error code to store
2398 * the final depth value set by
2399 * scsi_track_queue_full.
2400 */
2401 if (depth == -1)
2402 depth = shost->cmd_per_lun;
2403
2404 if (depth) {
2405 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2434 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2406 "0711 detected queue full - lun queue " 2435 "0711 detected queue full - lun queue "
2407 "depth adjusted to %d.\n", depth); 2436 "depth adjusted to %d.\n", depth);
2408 lpfc_send_sdev_queuedepth_change_event(phba, vport, 2437 lpfc_send_sdev_queuedepth_change_event(phba, vport,
2409 pnode, 0xFFFFFFFF, 2438 pnode,
2410 depth+1, depth); 2439 tmp_sdev->lun,
2440 depth+1, depth);
2411 } 2441 }
2412 } 2442 }
2413 2443
@@ -2642,6 +2672,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2642 } 2672 }
2643 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; 2673 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2644 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 2674 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2675 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2645 return 0; 2676 return 0;
2646} 2677}
2647 2678
@@ -2700,6 +2731,13 @@ lpfc_info(struct Scsi_Host *host)
2700 " port %s", 2731 " port %s",
2701 phba->Port); 2732 phba->Port);
2702 } 2733 }
2734 len = strlen(lpfcinfobuf);
2735 if (phba->sli4_hba.link_state.logical_speed) {
2736 snprintf(lpfcinfobuf + len,
2737 384-len,
2738 " Logical Link Speed: %d Mbps",
2739 phba->sli4_hba.link_state.logical_speed * 10);
2740 }
2703 } 2741 }
2704 return lpfcinfobuf; 2742 return lpfcinfobuf;
2705} 2743}
@@ -2745,7 +2783,9 @@ void lpfc_poll_timeout(unsigned long ptr)
2745 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2783 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2746 2784
2747 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2785 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2748 lpfc_sli_poll_fcp_ring (phba); 2786 lpfc_sli_handle_fast_ring_event(phba,
2787 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2788
2749 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2789 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2750 lpfc_poll_rearm_timer(phba); 2790 lpfc_poll_rearm_timer(phba);
2751 } 2791 }
@@ -2771,7 +2811,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2771 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2811 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2772 struct lpfc_hba *phba = vport->phba; 2812 struct lpfc_hba *phba = vport->phba;
2773 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2813 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2774 struct lpfc_nodelist *ndlp = rdata->pnode; 2814 struct lpfc_nodelist *ndlp;
2775 struct lpfc_scsi_buf *lpfc_cmd; 2815 struct lpfc_scsi_buf *lpfc_cmd;
2776 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 2816 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2777 int err; 2817 int err;
@@ -2781,13 +2821,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2781 cmnd->result = err; 2821 cmnd->result = err;
2782 goto out_fail_command; 2822 goto out_fail_command;
2783 } 2823 }
2824 ndlp = rdata->pnode;
2784 2825
2785 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 2826 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2786 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2827 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2787 2828
2788 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x " 2829 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2789 "str=%s without registering for BlockGuard - " 2830 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
2790 "Rejecting command\n", 2831 " op:%02x str=%s without registering for"
2832 " BlockGuard - Rejecting command\n",
2791 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2833 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2792 dif_op_str[scsi_get_prot_op(cmnd)]); 2834 dif_op_str[scsi_get_prot_op(cmnd)]);
2793 goto out_fail_command; 2835 goto out_fail_command;
@@ -2827,61 +2869,66 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2827 cmnd->scsi_done = done; 2869 cmnd->scsi_done = done;
2828 2870
2829 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2871 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2830 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2872 if (vport->phba->cfg_enable_bg) {
2873 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2831 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " 2874 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2832 "str=%s\n", 2875 "str=%s\n",
2833 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2876 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2834 dif_op_str[scsi_get_prot_op(cmnd)]); 2877 dif_op_str[scsi_get_prot_op(cmnd)]);
2835 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2878 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2836 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " 2879 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2837 "%02x %02x %02x %02x %02x\n", 2880 "%02x %02x %02x %02x %02x\n",
2838 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 2881 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2839 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 2882 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2840 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 2883 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2841 cmnd->cmnd[9]); 2884 cmnd->cmnd[9]);
2842 if (cmnd->cmnd[0] == READ_10) 2885 if (cmnd->cmnd[0] == READ_10)
2843 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2886 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2844 "9035 BLKGRD: READ @ sector %llu, " 2887 "9035 BLKGRD: READ @ sector %llu, "
2845 "count %u\n", 2888 "count %u\n",
2846 (unsigned long long)scsi_get_lba(cmnd), 2889 (unsigned long long)scsi_get_lba(cmnd),
2847 blk_rq_sectors(cmnd->request)); 2890 blk_rq_sectors(cmnd->request));
2848 else if (cmnd->cmnd[0] == WRITE_10) 2891 else if (cmnd->cmnd[0] == WRITE_10)
2849 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2892 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2850 "9036 BLKGRD: WRITE @ sector %llu, " 2893 "9036 BLKGRD: WRITE @ sector %llu, "
2851 "count %u cmd=%p\n", 2894 "count %u cmd=%p\n",
2852 (unsigned long long)scsi_get_lba(cmnd), 2895 (unsigned long long)scsi_get_lba(cmnd),
2853 blk_rq_sectors(cmnd->request), 2896 blk_rq_sectors(cmnd->request),
2854 cmnd); 2897 cmnd);
2898 }
2855 2899
2856 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2900 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2857 } else { 2901 } else {
2858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2902 if (vport->phba->cfg_enable_bg) {
2859 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2860 " str=%s\n",
2861 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2862 dif_op_str[scsi_get_prot_op(cmnd)]);
2863 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2864 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2865 "%02x %02x %02x %02x %02x\n",
2866 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2867 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2868 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2869 cmnd->cmnd[9]);
2870 if (cmnd->cmnd[0] == READ_10)
2871 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2903 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2872 "9040 dbg: READ @ sector %llu, " 2904 "9038 BLKGRD: rcvd unprotected cmd:"
2873 "count %u\n", 2905 "%02x op:%02x str=%s\n",
2874 (unsigned long long)scsi_get_lba(cmnd), 2906 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2907 dif_op_str[scsi_get_prot_op(cmnd)]);
2908 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2909 "9039 BLKGRD: CDB: %02x %02x %02x "
2910 "%02x %02x %02x %02x %02x %02x %02x\n",
2911 cmnd->cmnd[0], cmnd->cmnd[1],
2912 cmnd->cmnd[2], cmnd->cmnd[3],
2913 cmnd->cmnd[4], cmnd->cmnd[5],
2914 cmnd->cmnd[6], cmnd->cmnd[7],
2915 cmnd->cmnd[8], cmnd->cmnd[9]);
2916 if (cmnd->cmnd[0] == READ_10)
2917 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2918 "9040 dbg: READ @ sector %llu, "
2919 "count %u\n",
2920 (unsigned long long)scsi_get_lba(cmnd),
2875 blk_rq_sectors(cmnd->request)); 2921 blk_rq_sectors(cmnd->request));
2876 else if (cmnd->cmnd[0] == WRITE_10) 2922 else if (cmnd->cmnd[0] == WRITE_10)
2877 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2923 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2878 "9041 dbg: WRITE @ sector %llu, " 2924 "9041 dbg: WRITE @ sector %llu, "
2879 "count %u cmd=%p\n", 2925 "count %u cmd=%p\n",
2880 (unsigned long long)scsi_get_lba(cmnd), 2926 (unsigned long long)scsi_get_lba(cmnd),
2881 blk_rq_sectors(cmnd->request), cmnd); 2927 blk_rq_sectors(cmnd->request), cmnd);
2882 else 2928 else
2883 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2929 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2884 "9042 dbg: parser not implemented\n"); 2930 "9042 dbg: parser not implemented\n");
2931 }
2885 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 2932 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2886 } 2933 }
2887 2934
@@ -2898,7 +2945,11 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2898 goto out_host_busy_free_buf; 2945 goto out_host_busy_free_buf;
2899 } 2946 }
2900 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2947 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2901 lpfc_sli_poll_fcp_ring(phba); 2948 spin_unlock(shost->host_lock);
2949 lpfc_sli_handle_fast_ring_event(phba,
2950 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2951
2952 spin_lock(shost->host_lock);
2902 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2953 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2903 lpfc_poll_rearm_timer(phba); 2954 lpfc_poll_rearm_timer(phba);
2904 } 2955 }
@@ -2917,28 +2968,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2917} 2968}
2918 2969
2919/** 2970/**
2920 * lpfc_block_error_handler - Routine to block error handler
2921 * @cmnd: Pointer to scsi_cmnd data structure.
2922 *
2923 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2924 **/
2925static void
2926lpfc_block_error_handler(struct scsi_cmnd *cmnd)
2927{
2928 struct Scsi_Host *shost = cmnd->device->host;
2929 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2930
2931 spin_lock_irq(shost->host_lock);
2932 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
2933 spin_unlock_irq(shost->host_lock);
2934 msleep(1000);
2935 spin_lock_irq(shost->host_lock);
2936 }
2937 spin_unlock_irq(shost->host_lock);
2938 return;
2939}
2940
2941/**
2942 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 2971 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
2943 * @cmnd: Pointer to scsi_cmnd data structure. 2972 * @cmnd: Pointer to scsi_cmnd data structure.
2944 * 2973 *
@@ -2961,7 +2990,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2961 int ret = SUCCESS; 2990 int ret = SUCCESS;
2962 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 2991 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
2963 2992
2964 lpfc_block_error_handler(cmnd); 2993 fc_block_scsi_eh(cmnd);
2965 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 2994 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
2966 BUG_ON(!lpfc_cmd); 2995 BUG_ON(!lpfc_cmd);
2967 2996
@@ -3001,6 +3030,11 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3001 3030
3002 icmd->ulpLe = 1; 3031 icmd->ulpLe = 1;
3003 icmd->ulpClass = cmd->ulpClass; 3032 icmd->ulpClass = cmd->ulpClass;
3033
3034 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3035 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
3036 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
3037
3004 if (lpfc_is_link_up(phba)) 3038 if (lpfc_is_link_up(phba))
3005 icmd->ulpCommand = CMD_ABORT_XRI_CN; 3039 icmd->ulpCommand = CMD_ABORT_XRI_CN;
3006 else 3040 else
@@ -3016,7 +3050,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3016 } 3050 }
3017 3051
3018 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3052 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3019 lpfc_sli_poll_fcp_ring (phba); 3053 lpfc_sli_handle_fast_ring_event(phba,
3054 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3020 3055
3021 lpfc_cmd->waitq = &waitq; 3056 lpfc_cmd->waitq = &waitq;
3022 /* Wait for abort to complete */ 3057 /* Wait for abort to complete */
@@ -3166,9 +3201,15 @@ static int
3166lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 3201lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3167{ 3202{
3168 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3203 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3169 struct lpfc_nodelist *pnode = rdata->pnode; 3204 struct lpfc_nodelist *pnode;
3170 unsigned long later; 3205 unsigned long later;
3171 3206
3207 if (!rdata) {
3208 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3209 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
3210 return FAILED;
3211 }
3212 pnode = rdata->pnode;
3172 /* 3213 /*
3173 * If target is not in a MAPPED state, delay until 3214 * If target is not in a MAPPED state, delay until
3174 * target is rediscovered or devloss timeout expires. 3215 * target is rediscovered or devloss timeout expires.
@@ -3253,13 +3294,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3253 struct Scsi_Host *shost = cmnd->device->host; 3294 struct Scsi_Host *shost = cmnd->device->host;
3254 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3295 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3255 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3296 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3256 struct lpfc_nodelist *pnode = rdata->pnode; 3297 struct lpfc_nodelist *pnode;
3257 unsigned tgt_id = cmnd->device->id; 3298 unsigned tgt_id = cmnd->device->id;
3258 unsigned int lun_id = cmnd->device->lun; 3299 unsigned int lun_id = cmnd->device->lun;
3259 struct lpfc_scsi_event_header scsi_event; 3300 struct lpfc_scsi_event_header scsi_event;
3260 int status; 3301 int status;
3261 3302
3262 lpfc_block_error_handler(cmnd); 3303 if (!rdata) {
3304 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3305 "0798 Device Reset rport failure: rdata x%p\n", rdata);
3306 return FAILED;
3307 }
3308 pnode = rdata->pnode;
3309 fc_block_scsi_eh(cmnd);
3263 3310
3264 status = lpfc_chk_tgt_mapped(vport, cmnd); 3311 status = lpfc_chk_tgt_mapped(vport, cmnd);
3265 if (status == FAILED) { 3312 if (status == FAILED) {
@@ -3312,13 +3359,19 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3312 struct Scsi_Host *shost = cmnd->device->host; 3359 struct Scsi_Host *shost = cmnd->device->host;
3313 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3360 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3314 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3361 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3315 struct lpfc_nodelist *pnode = rdata->pnode; 3362 struct lpfc_nodelist *pnode;
3316 unsigned tgt_id = cmnd->device->id; 3363 unsigned tgt_id = cmnd->device->id;
3317 unsigned int lun_id = cmnd->device->lun; 3364 unsigned int lun_id = cmnd->device->lun;
3318 struct lpfc_scsi_event_header scsi_event; 3365 struct lpfc_scsi_event_header scsi_event;
3319 int status; 3366 int status;
3320 3367
3321 lpfc_block_error_handler(cmnd); 3368 if (!rdata) {
3369 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3370 "0799 Target Reset rport failure: rdata x%p\n", rdata);
3371 return FAILED;
3372 }
3373 pnode = rdata->pnode;
3374 fc_block_scsi_eh(cmnd);
3322 3375
3323 status = lpfc_chk_tgt_mapped(vport, cmnd); 3376 status = lpfc_chk_tgt_mapped(vport, cmnd);
3324 if (status == FAILED) { 3377 if (status == FAILED) {
@@ -3384,7 +3437,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3384 fc_host_post_vendor_event(shost, fc_get_event_number(), 3437 fc_host_post_vendor_event(shost, fc_get_event_number(),
3385 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3438 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3386 3439
3387 lpfc_block_error_handler(cmnd); 3440 fc_block_scsi_eh(cmnd);
3388 3441
3389 /* 3442 /*
3390 * Since the driver manages a single bus device, reset all 3443 * Since the driver manages a single bus device, reset all
@@ -3498,6 +3551,8 @@ lpfc_slave_alloc(struct scsi_device *sdev)
3498 "Allocated %d buffers.\n", 3551 "Allocated %d buffers.\n",
3499 num_to_alloc, num_allocated); 3552 num_to_alloc, num_allocated);
3500 } 3553 }
3554 if (num_allocated > 0)
3555 phba->total_scsi_bufs += num_allocated;
3501 return 0; 3556 return 0;
3502} 3557}
3503 3558
@@ -3534,7 +3589,8 @@ lpfc_slave_configure(struct scsi_device *sdev)
3534 rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3589 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3535 3590
3536 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3591 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3537 lpfc_sli_poll_fcp_ring(phba); 3592 lpfc_sli_handle_fast_ring_event(phba,
3593 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3538 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3594 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3539 lpfc_poll_rearm_timer(phba); 3595 lpfc_poll_rearm_timer(phba);
3540 } 3596 }
@@ -3576,6 +3632,7 @@ struct scsi_host_template lpfc_template = {
3576 .shost_attrs = lpfc_hba_attrs, 3632 .shost_attrs = lpfc_hba_attrs,
3577 .max_sectors = 0xFFFF, 3633 .max_sectors = 0xFFFF,
3578 .vendor_id = LPFC_NL_VENDOR_ID, 3634 .vendor_id = LPFC_NL_VENDOR_ID,
3635 .change_queue_depth = lpfc_change_queue_depth,
3579}; 3636};
3580 3637
3581struct scsi_host_template lpfc_vport_template = { 3638struct scsi_host_template lpfc_vport_template = {
@@ -3597,4 +3654,5 @@ struct scsi_host_template lpfc_vport_template = {
3597 .use_clustering = ENABLE_CLUSTERING, 3654 .use_clustering = ENABLE_CLUSTERING,
3598 .shost_attrs = lpfc_vport_attrs, 3655 .shost_attrs = lpfc_vport_attrs,
3599 .max_sectors = 0xFFFF, 3656 .max_sectors = 0xFFFF,
3657 .change_queue_depth = lpfc_change_queue_depth,
3600}; 3658};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 65dfc8bd5b49..5932273870a5 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -118,6 +118,7 @@ struct lpfc_scsi_buf {
118 118
119 uint32_t timeout; 119 uint32_t timeout;
120 120
121 uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
121 uint16_t status; /* From IOCB Word 7- ulpStatus */ 122 uint16_t status; /* From IOCB Word 7- ulpStatus */
122 uint32_t result; /* From IOCB Word 4. */ 123 uint32_t result; /* From IOCB Word 4. */
123 124
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 43cbe336f1f8..049fb9a17b3f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/slab.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -30,6 +31,7 @@
30#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 32#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h> 33#include <scsi/fc/fc_fs.h>
34#include <linux/aer.h>
33 35
34#include "lpfc_hw4.h" 36#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 37#include "lpfc_hw.h"
@@ -58,8 +60,11 @@ typedef enum _lpfc_iocb_type {
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 60static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t); 61 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 62static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *); 63 uint8_t *, uint32_t *);
62 64static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *);
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *);
63static IOCB_t * 68static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 69lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{ 70{
@@ -259,6 +264,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 264 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 265 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 266 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
267 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
268 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
269 readl(q->phba->sli4_hba.EQCQDBregaddr);
262 return released; 270 return released;
263} 271}
264 272
@@ -487,7 +495,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
487 * 495 *
488 * Returns sglq ponter = success, NULL = Failure. 496 * Returns sglq ponter = success, NULL = Failure.
489 **/ 497 **/
490static struct lpfc_sglq * 498struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 499__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{ 500{
493 uint16_t adj_xri; 501 uint16_t adj_xri;
@@ -515,8 +523,11 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
515 struct lpfc_sglq *sglq = NULL; 523 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri; 524 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 525 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
526 if (!sglq)
527 return NULL;
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 528 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 529 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
530 sglq->state = SGL_ALLOCATED;
520 return sglq; 531 return sglq;
521} 532}
522 533
@@ -571,18 +582,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
571 else 582 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 583 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) { 584 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED 585 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 586 (sglq->state != SGL_XRI_ABORTED)) {
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 587 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag); 588 iflag);
580 list_add(&sglq->list, 589 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list); 590 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore( 591 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag); 592 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else 593 } else {
594 sglq->state = SGL_FREED;
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 595 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
596 }
586 } 597 }
587 598
588 599
@@ -755,10 +766,6 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
755 case DSSCMD_IWRITE64_CX: 766 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR: 767 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX: 768 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
762 type = LPFC_SOL_IOCB; 769 type = LPFC_SOL_IOCB;
763 break; 770 break;
764 case CMD_ABORT_XRI_CN: 771 case CMD_ABORT_XRI_CN:
@@ -767,6 +774,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
767 case CMD_CLOSE_XRI_CX: 774 case CMD_CLOSE_XRI_CX:
768 case CMD_XRI_ABORTED_CX: 775 case CMD_XRI_ABORTED_CX:
769 case CMD_ABORT_MXRI64_CN: 776 case CMD_ABORT_MXRI64_CN:
777 case CMD_XMIT_BLS_RSP64_CX:
770 type = LPFC_ABORT_IOCB; 778 type = LPFC_ABORT_IOCB;
771 break; 779 break;
772 case CMD_RCV_SEQUENCE_CX: 780 case CMD_RCV_SEQUENCE_CX:
@@ -1373,7 +1381,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1373/* HBQ for ELS and CT traffic. */ 1381/* HBQ for ELS and CT traffic. */
1374static struct lpfc_hbq_init lpfc_els_hbq = { 1382static struct lpfc_hbq_init lpfc_els_hbq = {
1375 .rn = 1, 1383 .rn = 1,
1376 .entry_count = 200, 1384 .entry_count = 256,
1377 .mask_count = 0, 1385 .mask_count = 0,
1378 .profile = 0, 1386 .profile = 0,
1379 .ring_mask = (1 << LPFC_ELS_RING), 1387 .ring_mask = (1 << LPFC_ELS_RING),
@@ -1472,8 +1480,11 @@ err:
1472int 1480int
1473lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1481lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1474{ 1482{
1475 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1483 if (phba->sli_rev == LPFC_SLI_REV4)
1476 lpfc_hbq_defs[qno]->add_count)); 1484 return 0;
1485 else
1486 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1487 lpfc_hbq_defs[qno]->add_count);
1477} 1488}
1478 1489
1479/** 1490/**
@@ -1488,8 +1499,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1488static int 1499static int
1489lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1500lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1490{ 1501{
1491 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1502 if (phba->sli_rev == LPFC_SLI_REV4)
1492 lpfc_hbq_defs[qno]->init_count)); 1503 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1504 lpfc_hbq_defs[qno]->entry_count);
1505 else
1506 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1507 lpfc_hbq_defs[qno]->init_count);
1493} 1508}
1494 1509
1495/** 1510/**
@@ -1700,6 +1715,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1700 struct lpfc_dmabuf *mp; 1715 struct lpfc_dmabuf *mp;
1701 uint16_t rpi, vpi; 1716 uint16_t rpi, vpi;
1702 int rc; 1717 int rc;
1718 struct lpfc_vport *vport = pmb->vport;
1703 1719
1704 mp = (struct lpfc_dmabuf *) (pmb->context1); 1720 mp = (struct lpfc_dmabuf *) (pmb->context1);
1705 1721
@@ -1728,6 +1744,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1728 return; 1744 return;
1729 } 1745 }
1730 1746
1747 /* Unreg VPI, if the REG_VPI succeed after VLink failure */
1748 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
1749 !(phba->pport->load_flag & FC_UNLOADING) &&
1750 !pmb->u.mb.mbxStatus) {
1751 lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
1752 pmb->vport = vport;
1753 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1754 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1755 if (rc != MBX_NOT_FINISHED)
1756 return;
1757 }
1758
1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 1759 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1732 lpfc_sli4_mbox_cmd_free(phba, pmb); 1760 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else 1761 else
@@ -1794,7 +1822,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1794 */ 1822 */
1795 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 1823 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
1796 MBX_SHUTDOWN) { 1824 MBX_SHUTDOWN) {
1797 /* Unknow mailbox command compl */ 1825 /* Unknown mailbox command compl */
1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1826 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1799 "(%d):0323 Unknown Mailbox command " 1827 "(%d):0323 Unknown Mailbox command "
1800 "x%x (x%x) Cmpl\n", 1828 "x%x (x%x) Cmpl\n",
@@ -2068,8 +2096,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2068 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2096 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2069 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2097 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2070 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2098 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2071 Rctl = FC_ELS_REQ; 2099 Rctl = FC_RCTL_ELS_REQ;
2072 Type = FC_ELS_DATA; 2100 Type = FC_TYPE_ELS;
2073 } else { 2101 } else {
2074 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2102 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2075 Rctl = w5p->hcsw.Rctl; 2103 Rctl = w5p->hcsw.Rctl;
@@ -2079,8 +2107,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2079 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2107 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2080 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2108 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2081 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2109 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2082 Rctl = FC_ELS_REQ; 2110 Rctl = FC_RCTL_ELS_REQ;
2083 Type = FC_ELS_DATA; 2111 Type = FC_TYPE_ELS;
2084 w5p->hcsw.Rctl = Rctl; 2112 w5p->hcsw.Rctl = Rctl;
2085 w5p->hcsw.Type = Type; 2113 w5p->hcsw.Type = Type;
2086 } 2114 }
@@ -2211,9 +2239,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2211 * All other are passed to the completion callback. 2239 * All other are passed to the completion callback.
2212 */ 2240 */
2213 if (pring->ringno == LPFC_ELS_RING) { 2241 if (pring->ringno == LPFC_ELS_RING) {
2214 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { 2242 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2243 (cmdiocbp->iocb_flag &
2244 LPFC_DRIVER_ABORTED)) {
2245 spin_lock_irqsave(&phba->hbalock,
2246 iflag);
2215 cmdiocbp->iocb_flag &= 2247 cmdiocbp->iocb_flag &=
2216 ~LPFC_DRIVER_ABORTED; 2248 ~LPFC_DRIVER_ABORTED;
2249 spin_unlock_irqrestore(&phba->hbalock,
2250 iflag);
2217 saveq->iocb.ulpStatus = 2251 saveq->iocb.ulpStatus =
2218 IOSTAT_LOCAL_REJECT; 2252 IOSTAT_LOCAL_REJECT;
2219 saveq->iocb.un.ulpWord[4] = 2253 saveq->iocb.un.ulpWord[4] =
@@ -2223,7 +2257,62 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2223 * of DMAing payload, so don't free data 2257 * of DMAing payload, so don't free data
2224 * buffer till after a hbeat. 2258 * buffer till after a hbeat.
2225 */ 2259 */
2260 spin_lock_irqsave(&phba->hbalock,
2261 iflag);
2226 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2262 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2263 spin_unlock_irqrestore(&phba->hbalock,
2264 iflag);
2265 }
2266 if (phba->sli_rev == LPFC_SLI_REV4) {
2267 if (saveq->iocb_flag &
2268 LPFC_EXCHANGE_BUSY) {
2269 /* Set cmdiocb flag for the
2270 * exchange busy so sgl (xri)
2271 * will not be released until
2272 * the abort xri is received
2273 * from hba.
2274 */
2275 spin_lock_irqsave(
2276 &phba->hbalock, iflag);
2277 cmdiocbp->iocb_flag |=
2278 LPFC_EXCHANGE_BUSY;
2279 spin_unlock_irqrestore(
2280 &phba->hbalock, iflag);
2281 }
2282 if (cmdiocbp->iocb_flag &
2283 LPFC_DRIVER_ABORTED) {
2284 /*
2285 * Clear LPFC_DRIVER_ABORTED
2286 * bit in case it was driver
2287 * initiated abort.
2288 */
2289 spin_lock_irqsave(
2290 &phba->hbalock, iflag);
2291 cmdiocbp->iocb_flag &=
2292 ~LPFC_DRIVER_ABORTED;
2293 spin_unlock_irqrestore(
2294 &phba->hbalock, iflag);
2295 cmdiocbp->iocb.ulpStatus =
2296 IOSTAT_LOCAL_REJECT;
2297 cmdiocbp->iocb.un.ulpWord[4] =
2298 IOERR_ABORT_REQUESTED;
2299 /*
2300 * For SLI4, irsiocb contains
2301 * NO_XRI in sli_xritag, it
2302 * shall not affect releasing
2303 * sgl (xri) process.
2304 */
2305 saveq->iocb.ulpStatus =
2306 IOSTAT_LOCAL_REJECT;
2307 saveq->iocb.un.ulpWord[4] =
2308 IOERR_SLI_ABORTED;
2309 spin_lock_irqsave(
2310 &phba->hbalock, iflag);
2311 saveq->iocb_flag |=
2312 LPFC_DELAY_MEM_FREE;
2313 spin_unlock_irqrestore(
2314 &phba->hbalock, iflag);
2315 }
2227 } 2316 }
2228 } 2317 }
2229 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2318 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2324,168 +2413,6 @@ void lpfc_poll_eratt(unsigned long ptr)
2324 return; 2413 return;
2325} 2414}
2326 2415
2327/**
2328 * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode
2329 * @phba: Pointer to HBA context object.
2330 *
2331 * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
2332 * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
2333 * is enabled.
2334 *
2335 * The caller does not hold any lock.
2336 * The function processes each response iocb in the response ring until it
2337 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2338 * LE bit set. The function will call the completion handler of the command iocb
2339 * if the response iocb indicates a completion for a command iocb or it is
2340 * an abort completion.
2341 **/
2342void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2343{
2344 struct lpfc_sli *psli = &phba->sli;
2345 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
2346 IOCB_t *irsp = NULL;
2347 IOCB_t *entry = NULL;
2348 struct lpfc_iocbq *cmdiocbq = NULL;
2349 struct lpfc_iocbq rspiocbq;
2350 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2351 uint32_t status;
2352 uint32_t portRspPut, portRspMax;
2353 int type;
2354 uint32_t rsp_cmpl = 0;
2355 uint32_t ha_copy;
2356 unsigned long iflags;
2357
2358 pring->stats.iocb_event++;
2359
2360 /*
2361 * The next available response entry should never exceed the maximum
2362 * entries. If it does, treat it as an adapter hardware error.
2363 */
2364 portRspMax = pring->numRiocb;
2365 portRspPut = le32_to_cpu(pgp->rspPutInx);
2366 if (unlikely(portRspPut >= portRspMax)) {
2367 lpfc_sli_rsp_pointers_error(phba, pring);
2368 return;
2369 }
2370
2371 rmb();
2372 while (pring->rspidx != portRspPut) {
2373 entry = lpfc_resp_iocb(phba, pring);
2374 if (++pring->rspidx >= portRspMax)
2375 pring->rspidx = 0;
2376
2377 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2378 (uint32_t *) &rspiocbq.iocb,
2379 phba->iocb_rsp_size);
2380 irsp = &rspiocbq.iocb;
2381 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2382 pring->stats.iocb_rsp++;
2383 rsp_cmpl++;
2384
2385 if (unlikely(irsp->ulpStatus)) {
2386 /* Rsp ring <ringno> error: IOCB */
2387 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2388 "0326 Rsp Ring %d error: IOCB Data: "
2389 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2390 pring->ringno,
2391 irsp->un.ulpWord[0],
2392 irsp->un.ulpWord[1],
2393 irsp->un.ulpWord[2],
2394 irsp->un.ulpWord[3],
2395 irsp->un.ulpWord[4],
2396 irsp->un.ulpWord[5],
2397 *(uint32_t *)&irsp->un1,
2398 *((uint32_t *)&irsp->un1 + 1));
2399 }
2400
2401 switch (type) {
2402 case LPFC_ABORT_IOCB:
2403 case LPFC_SOL_IOCB:
2404 /*
2405 * Idle exchange closed via ABTS from port. No iocb
2406 * resources need to be recovered.
2407 */
2408 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2409 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2410 "0314 IOCB cmd 0x%x "
2411 "processed. Skipping "
2412 "completion",
2413 irsp->ulpCommand);
2414 break;
2415 }
2416
2417 spin_lock_irqsave(&phba->hbalock, iflags);
2418 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2419 &rspiocbq);
2420 spin_unlock_irqrestore(&phba->hbalock, iflags);
2421 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2422 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2423 &rspiocbq);
2424 }
2425 break;
2426 default:
2427 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2428 char adaptermsg[LPFC_MAX_ADPTMSG];
2429 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2430 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2431 MAX_MSG_DATA);
2432 dev_warn(&((phba->pcidev)->dev),
2433 "lpfc%d: %s\n",
2434 phba->brd_no, adaptermsg);
2435 } else {
2436 /* Unknown IOCB command */
2437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2438 "0321 Unknown IOCB command "
2439 "Data: x%x, x%x x%x x%x x%x\n",
2440 type, irsp->ulpCommand,
2441 irsp->ulpStatus,
2442 irsp->ulpIoTag,
2443 irsp->ulpContext);
2444 }
2445 break;
2446 }
2447
2448 /*
2449 * The response IOCB has been processed. Update the ring
2450 * pointer in SLIM. If the port response put pointer has not
2451 * been updated, sync the pgp->rspPutInx and fetch the new port
2452 * response put pointer.
2453 */
2454 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2455
2456 if (pring->rspidx == portRspPut)
2457 portRspPut = le32_to_cpu(pgp->rspPutInx);
2458 }
2459
2460 ha_copy = readl(phba->HAregaddr);
2461 ha_copy >>= (LPFC_FCP_RING * 4);
2462
2463 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
2464 spin_lock_irqsave(&phba->hbalock, iflags);
2465 pring->stats.iocb_rsp_full++;
2466 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
2467 writel(status, phba->CAregaddr);
2468 readl(phba->CAregaddr);
2469 spin_unlock_irqrestore(&phba->hbalock, iflags);
2470 }
2471 if ((ha_copy & HA_R0CE_RSP) &&
2472 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2473 spin_lock_irqsave(&phba->hbalock, iflags);
2474 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2475 pring->stats.iocb_cmd_empty++;
2476
2477 /* Force update of the local copy of cmdGetInx */
2478 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2479 lpfc_sli_resume_iocb(phba, pring);
2480
2481 if ((pring->lpfc_sli_cmd_available))
2482 (pring->lpfc_sli_cmd_available) (phba, pring);
2483
2484 spin_unlock_irqrestore(&phba->hbalock, iflags);
2485 }
2486
2487 return;
2488}
2489 2416
2490/** 2417/**
2491 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2418 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
@@ -2502,9 +2429,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2502 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2429 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2503 * function if this is an unsolicited iocb. 2430 * function if this is an unsolicited iocb.
2504 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2431 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2505 * to check it explicitly. This function always returns 1. 2432 * to check it explicitly.
2506 **/ 2433 */
2507static int 2434int
2508lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2435lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2509 struct lpfc_sli_ring *pring, uint32_t mask) 2436 struct lpfc_sli_ring *pring, uint32_t mask)
2510{ 2437{
@@ -2534,6 +2461,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2534 spin_unlock_irqrestore(&phba->hbalock, iflag); 2461 spin_unlock_irqrestore(&phba->hbalock, iflag);
2535 return 1; 2462 return 1;
2536 } 2463 }
2464 if (phba->fcp_ring_in_use) {
2465 spin_unlock_irqrestore(&phba->hbalock, iflag);
2466 return 1;
2467 } else
2468 phba->fcp_ring_in_use = 1;
2537 2469
2538 rmb(); 2470 rmb();
2539 while (pring->rspidx != portRspPut) { 2471 while (pring->rspidx != portRspPut) {
@@ -2603,18 +2535,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2603 2535
2604 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2536 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2605 &rspiocbq); 2537 &rspiocbq);
2606 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2538 if (unlikely(!cmdiocbq))
2607 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2539 break;
2608 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2540 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2609 &rspiocbq); 2541 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2610 } else { 2542 if (cmdiocbq->iocb_cmpl) {
2611 spin_unlock_irqrestore(&phba->hbalock, 2543 spin_unlock_irqrestore(&phba->hbalock, iflag);
2612 iflag); 2544 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2613 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2545 &rspiocbq);
2614 &rspiocbq); 2546 spin_lock_irqsave(&phba->hbalock, iflag);
2615 spin_lock_irqsave(&phba->hbalock,
2616 iflag);
2617 }
2618 } 2547 }
2619 break; 2548 break;
2620 case LPFC_UNSOL_IOCB: 2549 case LPFC_UNSOL_IOCB:
@@ -2675,6 +2604,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2675 2604
2676 } 2605 }
2677 2606
2607 phba->fcp_ring_in_use = 0;
2678 spin_unlock_irqrestore(&phba->hbalock, iflag); 2608 spin_unlock_irqrestore(&phba->hbalock, iflag);
2679 return rc; 2609 return rc;
2680} 2610}
@@ -3018,16 +2948,39 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask) 2948 struct lpfc_sli_ring *pring, uint32_t mask)
3019{ 2949{
3020 struct lpfc_iocbq *irspiocbq; 2950 struct lpfc_iocbq *irspiocbq;
2951 struct hbq_dmabuf *dmabuf;
2952 struct lpfc_cq_event *cq_event;
3021 unsigned long iflag; 2953 unsigned long iflag;
3022 2954
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { 2955 spin_lock_irqsave(&phba->hbalock, iflag);
2956 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
2957 spin_unlock_irqrestore(&phba->hbalock, iflag);
2958 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3024 /* Get the response iocb from the head of work queue */ 2959 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag); 2960 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, 2961 list_remove_head(&phba->sli4_hba.sp_queue_event,
3027 irspiocbq, struct lpfc_iocbq, list); 2962 cq_event, struct lpfc_cq_event, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag); 2963 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */ 2964
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); 2965 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
2966 case CQE_CODE_COMPL_WQE:
2967 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
2968 cq_event);
2969 /* Translate ELS WCQE to response IOCBQ */
2970 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
2971 irspiocbq);
2972 if (irspiocbq)
2973 lpfc_sli_sp_handle_rspiocb(phba, pring,
2974 irspiocbq);
2975 break;
2976 case CQE_CODE_RECEIVE:
2977 dmabuf = container_of(cq_event, struct hbq_dmabuf,
2978 cq_event);
2979 lpfc_sli4_handle_received_buffer(phba, dmabuf);
2980 break;
2981 default:
2982 break;
2983 }
3031 } 2984 }
3032} 2985}
3033 2986
@@ -3160,6 +3113,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3160 3113
3161 /* Check to see if any errors occurred during init */ 3114 /* Check to see if any errors occurred during init */
3162 if ((status & HS_FFERM) || (i >= 20)) { 3115 if ((status & HS_FFERM) || (i >= 20)) {
3116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3117 "2751 Adapter failed to restart, "
3118 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3119 status,
3120 readl(phba->MBslimaddr + 0xa8),
3121 readl(phba->MBslimaddr + 0xac));
3163 phba->link_state = LPFC_HBA_ERROR; 3122 phba->link_state = LPFC_HBA_ERROR;
3164 retval = 1; 3123 retval = 1;
3165 } 3124 }
@@ -3347,6 +3306,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3347 if (retval != MBX_SUCCESS) { 3306 if (retval != MBX_SUCCESS) {
3348 if (retval != MBX_BUSY) 3307 if (retval != MBX_BUSY)
3349 mempool_free(pmb, phba->mbox_mem_pool); 3308 mempool_free(pmb, phba->mbox_mem_pool);
3309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3310 "2752 KILL_BOARD command failed retval %d\n",
3311 retval);
3350 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
3351 phba->link_flag &= ~LS_IGNORE_ERATT; 3313 phba->link_flag &= ~LS_IGNORE_ERATT;
3352 spin_unlock_irq(&phba->hbalock); 3314 spin_unlock_irq(&phba->hbalock);
@@ -3416,6 +3378,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
3416 3378
3417 /* perform board reset */ 3379 /* perform board reset */
3418 phba->fc_eventTag = 0; 3380 phba->fc_eventTag = 0;
3381 phba->link_events = 0;
3419 phba->pport->fc_myDID = 0; 3382 phba->pport->fc_myDID = 0;
3420 phba->pport->fc_prevDID = 0; 3383 phba->pport->fc_prevDID = 0;
3421 3384
@@ -3476,6 +3439,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3476 3439
3477 /* perform board reset */ 3440 /* perform board reset */
3478 phba->fc_eventTag = 0; 3441 phba->fc_eventTag = 0;
3442 phba->link_events = 0;
3479 phba->pport->fc_myDID = 0; 3443 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0; 3444 phba->pport->fc_prevDID = 0;
3481 3445
@@ -3495,7 +3459,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3495 list_del_init(&phba->sli4_hba.dat_rq->list); 3459 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list); 3460 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list); 3461 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3462 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3463 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3464 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
@@ -3531,9 +3494,13 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3531 struct lpfc_sli *psli; 3494 struct lpfc_sli *psli;
3532 volatile uint32_t word0; 3495 volatile uint32_t word0;
3533 void __iomem *to_slim; 3496 void __iomem *to_slim;
3497 uint32_t hba_aer_enabled;
3534 3498
3535 spin_lock_irq(&phba->hbalock); 3499 spin_lock_irq(&phba->hbalock);
3536 3500
3501 /* Take PCIe device Advanced Error Reporting (AER) state */
3502 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3503
3537 psli = &phba->sli; 3504 psli = &phba->sli;
3538 3505
3539 /* Restart HBA */ 3506 /* Restart HBA */
@@ -3573,6 +3540,10 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3573 /* Give the INITFF and Post time to settle. */ 3540 /* Give the INITFF and Post time to settle. */
3574 mdelay(100); 3541 mdelay(100);
3575 3542
3543 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3544 if (hba_aer_enabled)
3545 pci_disable_pcie_error_reporting(phba->pcidev);
3546
3576 lpfc_hba_down_post(phba); 3547 lpfc_hba_down_post(phba);
3577 3548
3578 return 0; 3549 return 0;
@@ -4042,6 +4013,24 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4042 if (rc) 4013 if (rc)
4043 goto lpfc_sli_hba_setup_error; 4014 goto lpfc_sli_hba_setup_error;
4044 4015
4016 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4017 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4018 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4019 if (!rc) {
4020 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4021 "2709 This device supports "
4022 "Advanced Error Reporting (AER)\n");
4023 spin_lock_irq(&phba->hbalock);
4024 phba->hba_flag |= HBA_AER_ENABLED;
4025 spin_unlock_irq(&phba->hbalock);
4026 } else {
4027 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4028 "2708 This device does not support "
4029 "Advanced Error Reporting (AER)\n");
4030 phba->cfg_aer_support = 0;
4031 }
4032 }
4033
4045 if (phba->sli_rev == 3) { 4034 if (phba->sli_rev == 3) {
4046 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4035 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4047 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4036 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
@@ -4077,7 +4066,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4077 4066
4078lpfc_sli_hba_setup_error: 4067lpfc_sli_hba_setup_error:
4079 phba->link_state = LPFC_HBA_ERROR; 4068 phba->link_state = LPFC_HBA_ERROR;
4080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4081 "0445 Firmware initialization failed\n"); 4070 "0445 Firmware initialization failed\n");
4082 return rc; 4071 return rc;
4083} 4072}
@@ -4163,7 +4152,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4163 * addition, this routine gets the port vpd data. 4152 * addition, this routine gets the port vpd data.
4164 * 4153 *
4165 * Return codes 4154 * Return codes
4166 * 0 - sucessful 4155 * 0 - successful
4167 * ENOMEM - could not allocated memory. 4156 * ENOMEM - could not allocated memory.
4168 **/ 4157 **/
4169static int 4158static int
@@ -4211,6 +4200,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4211 if (rc) { 4200 if (rc) {
4212 dma_free_coherent(&phba->pcidev->dev, dma_size, 4201 dma_free_coherent(&phba->pcidev->dev, dma_size,
4213 dmabuf->virt, dmabuf->phys); 4202 dmabuf->virt, dmabuf->phys);
4203 kfree(dmabuf);
4214 return -EIO; 4204 return -EIO;
4215 } 4205 }
4216 4206
@@ -4243,7 +4233,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4243 4233
4244 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4234 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4245 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4235 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4246 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4247 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4236 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4248 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4237 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4249 LPFC_QUEUE_REARM); 4238 LPFC_QUEUE_REARM);
@@ -4322,6 +4311,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 4311 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 4312 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4324 phba->hba_flag |= HBA_FCOE_SUPPORT; 4313 phba->hba_flag |= HBA_FCOE_SUPPORT;
4314
4315 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
4316 LPFC_DCBX_CEE_MODE)
4317 phba->hba_flag |= HBA_FIP_SUPPORT;
4318 else
4319 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4320
4325 if (phba->sli_rev != LPFC_SLI_REV4 || 4321 if (phba->sli_rev != LPFC_SLI_REV4 ||
4326 !(phba->hba_flag & HBA_FCOE_SUPPORT)) { 4322 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4323 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -4423,7 +4419,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4423 spin_unlock_irq(&phba->hbalock); 4419 spin_unlock_irq(&phba->hbalock);
4424 4420
4425 /* Read the port's service parameters. */ 4421 /* Read the port's service parameters. */
4426 lpfc_read_sparam(phba, mboxq, vport->vpi); 4422 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4423 if (rc) {
4424 phba->link_state = LPFC_HBA_ERROR;
4425 rc = -ENOMEM;
4426 goto out_free_vpd;
4427 }
4428
4427 mboxq->vport = vport; 4429 mboxq->vport = vport;
4428 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4430 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4429 mp = (struct lpfc_dmabuf *) mboxq->context1; 4431 mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4468,7 +4470,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4468 rc = lpfc_sli4_post_sgl_list(phba); 4470 rc = lpfc_sli4_post_sgl_list(phba);
4469 if (unlikely(rc)) { 4471 if (unlikely(rc)) {
4470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4472 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4471 "0582 Error %d during sgl post operation", rc); 4473 "0582 Error %d during sgl post operation\n",
4474 rc);
4472 rc = -ENODEV; 4475 rc = -ENODEV;
4473 goto out_free_vpd; 4476 goto out_free_vpd;
4474 } 4477 }
@@ -4477,8 +4480,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4477 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 4480 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4478 if (unlikely(rc)) { 4481 if (unlikely(rc)) {
4479 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4482 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4480 "0383 Error %d during scsi sgl post opeation", 4483 "0383 Error %d during scsi sgl post "
4481 rc); 4484 "operation\n", rc);
4482 /* Some Scsi buffers were moved to the abort scsi list */ 4485 /* Some Scsi buffers were moved to the abort scsi list */
4483 /* A pci function reset will repost them */ 4486 /* A pci function reset will repost them */
4484 rc = -ENODEV; 4487 rc = -ENODEV;
@@ -4494,10 +4497,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4494 rc = -ENODEV; 4497 rc = -ENODEV;
4495 goto out_free_vpd; 4498 goto out_free_vpd;
4496 } 4499 }
4497 if (phba->cfg_enable_fip)
4498 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4499 else
4500 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4501 4500
4502 /* Set up all the queues to the device */ 4501 /* Set up all the queues to the device */
4503 rc = lpfc_sli4_queue_setup(phba); 4502 rc = lpfc_sli4_queue_setup(phba);
@@ -4521,6 +4520,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4521 /* Post receive buffers to the device */ 4520 /* Post receive buffers to the device */
4522 lpfc_sli4_rb_setup(phba); 4521 lpfc_sli4_rb_setup(phba);
4523 4522
4523 /* Reset HBA FCF states after HBA reset */
4524 phba->fcf.fcf_flag = 0;
4525 phba->fcf.current_rec.flag = 0;
4526
4524 /* Start the ELS watchdog timer */ 4527 /* Start the ELS watchdog timer */
4525 mod_timer(&vport->els_tmofunc, 4528 mod_timer(&vport->els_tmofunc,
4526 jiffies + HZ * (phba->fc_ratov * 2)); 4529 jiffies + HZ * (phba->fc_ratov * 2));
@@ -5669,7 +5672,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
5669 case CMD_GEN_REQUEST64_CX: 5672 case CMD_GEN_REQUEST64_CX:
5670 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 5673 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
5671 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 5674 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
5672 FC_FCP_CMND) || 5675 FC_RCTL_DD_UNSOL_CMD) ||
5673 (piocb->iocb.un.genreq64.w5.hcsw.Type != 5676 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
5674 MENLO_TRANSPORT_TYPE)) 5677 MENLO_TRANSPORT_TYPE))
5675 5678
@@ -5777,19 +5780,19 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5777 5780
5778 for (i = 0; i < numBdes; i++) { 5781 for (i = 0; i < numBdes; i++) {
5779 /* Should already be byte swapped. */ 5782 /* Should already be byte swapped. */
5780 sgl->addr_hi = bpl->addrHigh; 5783 sgl->addr_hi = bpl->addrHigh;
5781 sgl->addr_lo = bpl->addrLow; 5784 sgl->addr_lo = bpl->addrLow;
5782 /* swap the size field back to the cpu so we 5785
5783 * can assign it to the sgl.
5784 */
5785 bde.tus.w = le32_to_cpu(bpl->tus.w);
5786 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5787 if ((i+1) == numBdes) 5786 if ((i+1) == numBdes)
5788 bf_set(lpfc_sli4_sge_last, sgl, 1); 5787 bf_set(lpfc_sli4_sge_last, sgl, 1);
5789 else 5788 else
5790 bf_set(lpfc_sli4_sge_last, sgl, 0); 5789 bf_set(lpfc_sli4_sge_last, sgl, 0);
5791 sgl->word2 = cpu_to_le32(sgl->word2); 5790 sgl->word2 = cpu_to_le32(sgl->word2);
5792 sgl->word3 = cpu_to_le32(sgl->word3); 5791 /* swap the size field back to the cpu so we
5792 * can assign it to the sgl.
5793 */
5794 bde.tus.w = le32_to_cpu(bpl->tus.w);
5795 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
5793 bpl++; 5796 bpl++;
5794 sgl++; 5797 sgl++;
5795 } 5798 }
@@ -5802,11 +5805,10 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5802 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 5805 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5803 sgl->addr_lo = 5806 sgl->addr_lo =
5804 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 5807 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5805 bf_set(lpfc_sli4_sge_len, sgl,
5806 icmd->un.genreq64.bdl.bdeSize);
5807 bf_set(lpfc_sli4_sge_last, sgl, 1); 5808 bf_set(lpfc_sli4_sge_last, sgl, 1);
5808 sgl->word2 = cpu_to_le32(sgl->word2); 5809 sgl->word2 = cpu_to_le32(sgl->word2);
5809 sgl->word3 = cpu_to_le32(sgl->word3); 5810 sgl->sge_len =
5811 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
5810 } 5812 }
5811 return sglq->sli4_xritag; 5813 return sglq->sli4_xritag;
5812} 5814}
@@ -5849,7 +5851,7 @@ static int
5849lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 5851lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5850 union lpfc_wqe *wqe) 5852 union lpfc_wqe *wqe)
5851{ 5853{
5852 uint32_t payload_len = 0; 5854 uint32_t xmit_len = 0, total_len = 0;
5853 uint8_t ct = 0; 5855 uint8_t ct = 0;
5854 uint32_t fip; 5856 uint32_t fip;
5855 uint32_t abort_tag; 5857 uint32_t abort_tag;
@@ -5857,12 +5859,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5857 uint8_t cmnd; 5859 uint8_t cmnd;
5858 uint16_t xritag; 5860 uint16_t xritag;
5859 struct ulp_bde64 *bpl = NULL; 5861 struct ulp_bde64 *bpl = NULL;
5862 uint32_t els_id = ELS_ID_DEFAULT;
5863 int numBdes, i;
5864 struct ulp_bde64 bde;
5860 5865
5861 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); 5866 fip = phba->hba_flag & HBA_FIP_SUPPORT;
5862 /* The fcp commands will set command type */ 5867 /* The fcp commands will set command type */
5863 if (iocbq->iocb_flag & LPFC_IO_FCP) 5868 if (iocbq->iocb_flag & LPFC_IO_FCP)
5864 command_type = FCP_COMMAND; 5869 command_type = FCP_COMMAND;
5865 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS)) 5870 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
5866 command_type = ELS_COMMAND_FIP; 5871 command_type = ELS_COMMAND_FIP;
5867 else 5872 else
5868 command_type = ELS_COMMAND_NON_FIP; 5873 command_type = ELS_COMMAND_NON_FIP;
@@ -5874,6 +5879,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5874 wqe->words[7] = 0; /* The ct field has moved so reset */ 5879 wqe->words[7] = 0; /* The ct field has moved so reset */
5875 /* words0-2 bpl convert bde */ 5880 /* words0-2 bpl convert bde */
5876 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5881 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5882 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
5883 sizeof(struct ulp_bde64);
5877 bpl = (struct ulp_bde64 *) 5884 bpl = (struct ulp_bde64 *)
5878 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 5885 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5879 if (!bpl) 5886 if (!bpl)
@@ -5886,9 +5893,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5886 * can assign it to the sgl. 5893 * can assign it to the sgl.
5887 */ 5894 */
5888 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 5895 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5889 payload_len = wqe->generic.bde.tus.f.bdeSize; 5896 xmit_len = wqe->generic.bde.tus.f.bdeSize;
5897 total_len = 0;
5898 for (i = 0; i < numBdes; i++) {
5899 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
5900 total_len += bde.tus.f.bdeSize;
5901 }
5890 } else 5902 } else
5891 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 5903 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5892 5904
5893 iocbq->iocb.ulpIoTag = iocbq->iotag; 5905 iocbq->iocb.ulpIoTag = iocbq->iotag;
5894 cmnd = iocbq->iocb.ulpCommand; 5906 cmnd = iocbq->iocb.ulpCommand;
@@ -5902,7 +5914,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5902 iocbq->iocb.ulpCommand); 5914 iocbq->iocb.ulpCommand);
5903 return IOCB_ERROR; 5915 return IOCB_ERROR;
5904 } 5916 }
5905 wqe->els_req.payload_len = payload_len; 5917 wqe->els_req.payload_len = xmit_len;
5906 /* Els_reguest64 has a TMO */ 5918 /* Els_reguest64 has a TMO */
5907 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 5919 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5908 iocbq->iocb.ulpTimeout); 5920 iocbq->iocb.ulpTimeout);
@@ -5923,7 +5935,22 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5923 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); 5935 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5924 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5936 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5925 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 5937 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5938
5939 if (command_type == ELS_COMMAND_FIP) {
5940 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
5941 >> LPFC_FIP_ELS_ID_SHIFT);
5942 }
5943 bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
5944
5926 break; 5945 break;
5946 case CMD_XMIT_SEQUENCE64_CX:
5947 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5948 iocbq->iocb.un.ulpWord[3]);
5949 wqe->generic.word3 = 0;
5950 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5951 /* The entire sequence is transmitted for this IOCB */
5952 xmit_len = total_len;
5953 cmnd = CMD_XMIT_SEQUENCE64_CR;
5927 case CMD_XMIT_SEQUENCE64_CR: 5954 case CMD_XMIT_SEQUENCE64_CR:
5928 /* word3 iocb=io_tag32 wqe=payload_offset */ 5955 /* word3 iocb=io_tag32 wqe=payload_offset */
5929 /* payload offset used for multilpe outstanding 5956 /* payload offset used for multilpe outstanding
@@ -5933,7 +5960,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5933 /* word4 relative_offset memcpy */ 5960 /* word4 relative_offset memcpy */
5934 /* word5 r_ctl/df_ctl memcpy */ 5961 /* word5 r_ctl/df_ctl memcpy */
5935 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5962 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5936 wqe->xmit_sequence.xmit_len = payload_len; 5963 wqe->xmit_sequence.xmit_len = xmit_len;
5964 command_type = OTHER_COMMAND;
5937 break; 5965 break;
5938 case CMD_XMIT_BCAST64_CN: 5966 case CMD_XMIT_BCAST64_CN:
5939 /* word3 iocb=iotag32 wqe=payload_len */ 5967 /* word3 iocb=iotag32 wqe=payload_len */
@@ -5962,7 +5990,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5962 case CMD_FCP_IREAD64_CR: 5990 case CMD_FCP_IREAD64_CR:
5963 /* FCP_CMD is always the 1st sgl entry */ 5991 /* FCP_CMD is always the 1st sgl entry */
5964 wqe->fcp_iread.payload_len = 5992 wqe->fcp_iread.payload_len =
5965 payload_len + sizeof(struct fcp_rsp); 5993 xmit_len + sizeof(struct fcp_rsp);
5966 5994
5967 /* word 4 (xfer length) should have been set on the memcpy */ 5995 /* word 4 (xfer length) should have been set on the memcpy */
5968 5996
@@ -5999,7 +6027,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5999 * sgl[1] = rsp. 6027 * sgl[1] = rsp.
6000 * 6028 *
6001 */ 6029 */
6002 wqe->gen_req.command_len = payload_len; 6030 wqe->gen_req.command_len = xmit_len;
6003 /* Word4 parameter copied in the memcpy */ 6031 /* Word4 parameter copied in the memcpy */
6004 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ 6032 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
6005 /* word6 context tag copied in memcpy */ 6033 /* word6 context tag copied in memcpy */
@@ -6051,12 +6079,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6051 else 6079 else
6052 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 6080 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
6053 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 6081 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
6054 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6055 wqe->words[5] = 0; 6082 wqe->words[5] = 0;
6056 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6083 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
6057 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6084 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6058 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 6085 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6059 wqe->generic.abort_tag = abort_tag;
6060 /* 6086 /*
6061 * The abort handler will send us CMD_ABORT_XRI_CN or 6087 * The abort handler will send us CMD_ABORT_XRI_CN or
6062 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 6088 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
@@ -6066,6 +6092,38 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6066 command_type = OTHER_COMMAND; 6092 command_type = OTHER_COMMAND;
6067 xritag = 0; 6093 xritag = 0;
6068 break; 6094 break;
6095 case CMD_XMIT_BLS_RSP64_CX:
6096 /* As BLS ABTS-ACC WQE is very different from other WQEs,
6097 * we re-construct this WQE here based on information in
6098 * iocbq from scratch.
6099 */
6100 memset(wqe, 0, sizeof(union lpfc_wqe));
6101 /* OX_ID is invariable to who sent ABTS to CT exchange */
6102 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
6103 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
6104 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
6105 LPFC_ABTS_UNSOL_INT) {
6106 /* ABTS sent by initiator to CT exchange, the
6107 * RX_ID field will be filled with the newly
6108 * allocated responder XRI.
6109 */
6110 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6111 iocbq->sli4_xritag);
6112 } else {
6113 /* ABTS sent by responder to CT exchange, the
6114 * RX_ID field will be filled with the responder
6115 * RX_ID from ABTS.
6116 */
6117 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6118 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
6119 }
6120 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
6121 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6122 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6123 iocbq->iocb.ulpContext);
6124 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
6125 command_type = OTHER_COMMAND;
6126 break;
6069 case CMD_XRI_ABORTED_CX: 6127 case CMD_XRI_ABORTED_CX:
6070 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6128 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
6071 /* words0-2 are all 0's no bde */ 6129 /* words0-2 are all 0's no bde */
@@ -6120,11 +6178,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6120 uint16_t xritag; 6178 uint16_t xritag;
6121 union lpfc_wqe wqe; 6179 union lpfc_wqe wqe;
6122 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6180 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6123 uint32_t fcp_wqidx;
6124 6181
6125 if (piocb->sli4_xritag == NO_XRI) { 6182 if (piocb->sli4_xritag == NO_XRI) {
6126 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6183 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6127 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6184 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6128 sglq = NULL; 6185 sglq = NULL;
6129 else { 6186 else {
6130 sglq = __lpfc_sli_get_sglq(phba); 6187 sglq = __lpfc_sli_get_sglq(phba);
@@ -6154,9 +6211,18 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6154 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 6211 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6155 return IOCB_ERROR; 6212 return IOCB_ERROR;
6156 6213
6157 if (piocb->iocb_flag & LPFC_IO_FCP) { 6214 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
6158 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 6215 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
6159 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) 6216 /*
6217 * For FCP command IOCB, get a new WQ index to distribute
6218 * WQE across the WQsr. On the other hand, for abort IOCB,
6219 * it carries the same WQ index to the original command
6220 * IOCB.
6221 */
6222 if (piocb->iocb_flag & LPFC_IO_FCP)
6223 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6224 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
6225 &wqe))
6160 return IOCB_ERROR; 6226 return IOCB_ERROR;
6161 } else { 6227 } else {
6162 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 6228 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
@@ -6449,31 +6515,37 @@ lpfc_sli_setup(struct lpfc_hba *phba)
6449 pring->iotag_max = 4096; 6515 pring->iotag_max = 4096;
6450 pring->lpfc_sli_rcv_async_status = 6516 pring->lpfc_sli_rcv_async_status =
6451 lpfc_sli_async_event_handler; 6517 lpfc_sli_async_event_handler;
6452 pring->num_mask = 4; 6518 pring->num_mask = LPFC_MAX_RING_MASK;
6453 pring->prt[0].profile = 0; /* Mask 0 */ 6519 pring->prt[0].profile = 0; /* Mask 0 */
6454 pring->prt[0].rctl = FC_ELS_REQ; 6520 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
6455 pring->prt[0].type = FC_ELS_DATA; 6521 pring->prt[0].type = FC_TYPE_ELS;
6456 pring->prt[0].lpfc_sli_rcv_unsol_event = 6522 pring->prt[0].lpfc_sli_rcv_unsol_event =
6457 lpfc_els_unsol_event; 6523 lpfc_els_unsol_event;
6458 pring->prt[1].profile = 0; /* Mask 1 */ 6524 pring->prt[1].profile = 0; /* Mask 1 */
6459 pring->prt[1].rctl = FC_ELS_RSP; 6525 pring->prt[1].rctl = FC_RCTL_ELS_REP;
6460 pring->prt[1].type = FC_ELS_DATA; 6526 pring->prt[1].type = FC_TYPE_ELS;
6461 pring->prt[1].lpfc_sli_rcv_unsol_event = 6527 pring->prt[1].lpfc_sli_rcv_unsol_event =
6462 lpfc_els_unsol_event; 6528 lpfc_els_unsol_event;
6463 pring->prt[2].profile = 0; /* Mask 2 */ 6529 pring->prt[2].profile = 0; /* Mask 2 */
6464 /* NameServer Inquiry */ 6530 /* NameServer Inquiry */
6465 pring->prt[2].rctl = FC_UNSOL_CTL; 6531 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
6466 /* NameServer */ 6532 /* NameServer */
6467 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 6533 pring->prt[2].type = FC_TYPE_CT;
6468 pring->prt[2].lpfc_sli_rcv_unsol_event = 6534 pring->prt[2].lpfc_sli_rcv_unsol_event =
6469 lpfc_ct_unsol_event; 6535 lpfc_ct_unsol_event;
6470 pring->prt[3].profile = 0; /* Mask 3 */ 6536 pring->prt[3].profile = 0; /* Mask 3 */
6471 /* NameServer response */ 6537 /* NameServer response */
6472 pring->prt[3].rctl = FC_SOL_CTL; 6538 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
6473 /* NameServer */ 6539 /* NameServer */
6474 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 6540 pring->prt[3].type = FC_TYPE_CT;
6475 pring->prt[3].lpfc_sli_rcv_unsol_event = 6541 pring->prt[3].lpfc_sli_rcv_unsol_event =
6476 lpfc_ct_unsol_event; 6542 lpfc_ct_unsol_event;
6543 /* abort unsolicited sequence */
6544 pring->prt[4].profile = 0; /* Mask 4 */
6545 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
6546 pring->prt[4].type = FC_TYPE_BLS;
6547 pring->prt[4].lpfc_sli_rcv_unsol_event =
6548 lpfc_sli4_ct_abort_unsol_event;
6477 break; 6549 break;
6478 } 6550 }
6479 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 6551 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
@@ -6976,8 +7048,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6976 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 7048 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
6977 7049
6978 spin_lock_irq(&phba->hbalock); 7050 spin_lock_irq(&phba->hbalock);
6979 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 7051 if (phba->sli_rev < LPFC_SLI_REV4) {
6980 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 7052 if (abort_iotag != 0 &&
7053 abort_iotag <= phba->sli.last_iotag)
7054 abort_iocb =
7055 phba->sli.iocbq_lookup[abort_iotag];
7056 } else
7057 /* For sli4 the abort_tag is the XRI,
7058 * so the abort routine puts the iotag of the iocb
7059 * being aborted in the context field of the abort
7060 * IOCB.
7061 */
7062 abort_iocb = phba->sli.iocbq_lookup[abort_context];
6981 7063
6982 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 7064 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
6983 "0327 Cannot abort els iocb %p " 7065 "0327 Cannot abort els iocb %p "
@@ -6991,9 +7073,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6991 * might have completed already. Do not free it again. 7073 * might have completed already. Do not free it again.
6992 */ 7074 */
6993 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 7075 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
6994 spin_unlock_irq(&phba->hbalock); 7076 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
6995 lpfc_sli_release_iocbq(phba, cmdiocb); 7077 spin_unlock_irq(&phba->hbalock);
6996 return; 7078 lpfc_sli_release_iocbq(phba, cmdiocb);
7079 return;
7080 }
7081 /* For SLI4 the ulpContext field for abort IOCB
7082 * holds the iotag of the IOCB being aborted so
7083 * the local abort_context needs to be reset to
7084 * match the aborted IOCBs ulpContext.
7085 */
7086 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
7087 abort_context = abort_iocb->iocb.ulpContext;
6997 } 7088 }
6998 /* 7089 /*
6999 * make sure we have the right iocbq before taking it 7090 * make sure we have the right iocbq before taking it
@@ -7003,7 +7094,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7003 abort_iocb->iocb.ulpContext != abort_context || 7094 abort_iocb->iocb.ulpContext != abort_context ||
7004 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 7095 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
7005 spin_unlock_irq(&phba->hbalock); 7096 spin_unlock_irq(&phba->hbalock);
7006 else { 7097 else if (phba->sli_rev < LPFC_SLI_REV4) {
7098 /*
7099 * leave the SLI4 aborted command on the txcmplq
7100 * list and the command complete WCQE's XB bit
7101 * will tell whether the SGL (XRI) can be released
7102 * immediately or to the aborted SGL list for the
7103 * following abort XRI from the HBA.
7104 */
7007 list_del_init(&abort_iocb->list); 7105 list_del_init(&abort_iocb->list);
7008 pring->txcmplq_cnt--; 7106 pring->txcmplq_cnt--;
7009 spin_unlock_irq(&phba->hbalock); 7107 spin_unlock_irq(&phba->hbalock);
@@ -7012,11 +7110,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7012 * payload, so don't free data buffer till after 7110 * payload, so don't free data buffer till after
7013 * a hbeat. 7111 * a hbeat.
7014 */ 7112 */
7113 spin_lock_irq(&phba->hbalock);
7015 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 7114 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
7016
7017 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 7115 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
7116 spin_unlock_irq(&phba->hbalock);
7117
7018 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 7118 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
7019 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; 7119 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
7020 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 7120 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
7021 } 7121 }
7022 } 7122 }
@@ -7105,20 +7205,27 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7105 return 0; 7205 return 0;
7106 7206
7107 /* This signals the response to set the correct status 7207 /* This signals the response to set the correct status
7108 * before calling the completion handler. 7208 * before calling the completion handler
7109 */ 7209 */
7110 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 7210 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
7111 7211
7112 iabt = &abtsiocbp->iocb; 7212 iabt = &abtsiocbp->iocb;
7113 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7213 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
7114 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7214 iabt->un.acxri.abortContextTag = icmd->ulpContext;
7115 if (phba->sli_rev == LPFC_SLI_REV4) 7215 if (phba->sli_rev == LPFC_SLI_REV4) {
7116 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 7216 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
7217 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
7218 }
7117 else 7219 else
7118 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7220 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
7119 iabt->ulpLe = 1; 7221 iabt->ulpLe = 1;
7120 iabt->ulpClass = icmd->ulpClass; 7222 iabt->ulpClass = icmd->ulpClass;
7121 7223
7224 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7225 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
7226 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
7227 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
7228
7122 if (phba->link_state >= LPFC_LINK_UP) 7229 if (phba->link_state >= LPFC_LINK_UP)
7123 iabt->ulpCommand = CMD_ABORT_XRI_CN; 7230 iabt->ulpCommand = CMD_ABORT_XRI_CN;
7124 else 7231 else
@@ -7322,6 +7429,11 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
7322 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7429 abtsiocb->iocb.ulpClass = cmd->ulpClass;
7323 abtsiocb->vport = phba->pport; 7430 abtsiocb->vport = phba->pport;
7324 7431
7432 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7433 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
7434 if (iocbq->iocb_flag & LPFC_IO_FCP)
7435 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
7436
7325 if (lpfc_is_link_up(phba)) 7437 if (lpfc_is_link_up(phba))
7326 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 7438 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
7327 else 7439 else
@@ -7365,6 +7477,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7365{ 7477{
7366 wait_queue_head_t *pdone_q; 7478 wait_queue_head_t *pdone_q;
7367 unsigned long iflags; 7479 unsigned long iflags;
7480 struct lpfc_scsi_buf *lpfc_cmd;
7368 7481
7369 spin_lock_irqsave(&phba->hbalock, iflags); 7482 spin_lock_irqsave(&phba->hbalock, iflags);
7370 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7483 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7372,6 +7485,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7372 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7485 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7373 &rspiocbq->iocb, sizeof(IOCB_t)); 7486 &rspiocbq->iocb, sizeof(IOCB_t));
7374 7487
7488 /* Set the exchange busy flag for task management commands */
7489 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
7490 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
7491 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
7492 cur_iocbq);
7493 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
7494 }
7495
7375 pdone_q = cmdiocbq->context_un.wait_queue; 7496 pdone_q = cmdiocbq->context_un.wait_queue;
7376 if (pdone_q) 7497 if (pdone_q)
7377 wake_up(pdone_q); 7498 wake_up(pdone_q);
@@ -7687,31 +7808,28 @@ static int
7687lpfc_sli4_eratt_read(struct lpfc_hba *phba) 7808lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7688{ 7809{
7689 uint32_t uerr_sta_hi, uerr_sta_lo; 7810 uint32_t uerr_sta_hi, uerr_sta_lo;
7690 uint32_t onlnreg0, onlnreg1;
7691 7811
7692 /* For now, use the SLI4 device internal unrecoverable error 7812 /* For now, use the SLI4 device internal unrecoverable error
7693 * registers for error attention. This can be changed later. 7813 * registers for error attention. This can be changed later.
7694 */ 7814 */
7695 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 7815 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7696 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 7816 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7697 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 7817 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
7698 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); 7818 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
7699 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); 7819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7700 if (uerr_sta_lo || uerr_sta_hi) { 7820 "1423 HBA Unrecoverable error: "
7701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7821 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7702 "1423 HBA Unrecoverable error: " 7822 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
7703 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 7823 uerr_sta_lo, uerr_sta_hi,
7704 "online0_reg=0x%x, online1_reg=0x%x\n", 7824 phba->sli4_hba.ue_mask_lo,
7705 uerr_sta_lo, uerr_sta_hi, 7825 phba->sli4_hba.ue_mask_hi);
7706 onlnreg0, onlnreg1); 7826 phba->work_status[0] = uerr_sta_lo;
7707 phba->work_status[0] = uerr_sta_lo; 7827 phba->work_status[1] = uerr_sta_hi;
7708 phba->work_status[1] = uerr_sta_hi; 7828 /* Set the driver HA work bitmap */
7709 /* Set the driver HA work bitmap */ 7829 phba->work_ha |= HA_ERATT;
7710 phba->work_ha |= HA_ERATT; 7830 /* Indicate polling handles this ERATT */
7711 /* Indicate polling handles this ERATT */ 7831 phba->hba_flag |= HBA_ERATT_HANDLED;
7712 phba->hba_flag |= HBA_ERATT_HANDLED; 7832 return 1;
7713 return 1;
7714 }
7715 } 7833 }
7716 return 0; 7834 return 0;
7717} 7835}
@@ -7834,7 +7952,7 @@ irqreturn_t
7834lpfc_sli_sp_intr_handler(int irq, void *dev_id) 7952lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7835{ 7953{
7836 struct lpfc_hba *phba; 7954 struct lpfc_hba *phba;
7837 uint32_t ha_copy; 7955 uint32_t ha_copy, hc_copy;
7838 uint32_t work_ha_copy; 7956 uint32_t work_ha_copy;
7839 unsigned long status; 7957 unsigned long status;
7840 unsigned long iflag; 7958 unsigned long iflag;
@@ -7892,8 +8010,13 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7892 } 8010 }
7893 8011
7894 /* Clear up only attention source related to slow-path */ 8012 /* Clear up only attention source related to slow-path */
8013 hc_copy = readl(phba->HCregaddr);
8014 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
8015 HC_LAINT_ENA | HC_ERINT_ENA),
8016 phba->HCregaddr);
7895 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 8017 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
7896 phba->HAregaddr); 8018 phba->HAregaddr);
8019 writel(hc_copy, phba->HCregaddr);
7897 readl(phba->HAregaddr); /* flush */ 8020 readl(phba->HAregaddr); /* flush */
7898 spin_unlock_irqrestore(&phba->hbalock, iflag); 8021 spin_unlock_irqrestore(&phba->hbalock, iflag);
7899 } else 8022 } else
@@ -8049,7 +8172,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8049 KERN_ERR, 8172 KERN_ERR,
8050 LOG_MBOX | LOG_SLI, 8173 LOG_MBOX | LOG_SLI,
8051 "0350 rc should have" 8174 "0350 rc should have"
8052 "been MBX_BUSY"); 8175 "been MBX_BUSY\n");
8053 if (rc != MBX_NOT_FINISHED) 8176 if (rc != MBX_NOT_FINISHED)
8054 goto send_current_mbox; 8177 goto send_current_mbox;
8055 } 8178 }
@@ -8078,7 +8201,7 @@ send_current_mbox:
8078 if (rc != MBX_SUCCESS) 8201 if (rc != MBX_SUCCESS)
8079 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8202 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8080 LOG_SLI, "0349 rc should be " 8203 LOG_SLI, "0349 rc should be "
8081 "MBX_SUCCESS"); 8204 "MBX_SUCCESS\n");
8082 } 8205 }
8083 8206
8084 spin_lock_irqsave(&phba->hbalock, iflag); 8207 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -8203,6 +8326,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8203 struct lpfc_hba *phba; 8326 struct lpfc_hba *phba;
8204 irqreturn_t sp_irq_rc, fp_irq_rc; 8327 irqreturn_t sp_irq_rc, fp_irq_rc;
8205 unsigned long status1, status2; 8328 unsigned long status1, status2;
8329 uint32_t hc_copy;
8206 8330
8207 /* 8331 /*
8208 * Get the driver's phba structure from the dev_id and 8332 * Get the driver's phba structure from the dev_id and
@@ -8240,7 +8364,12 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8240 } 8364 }
8241 8365
8242 /* Clear attention sources except link and error attentions */ 8366 /* Clear attention sources except link and error attentions */
8367 hc_copy = readl(phba->HCregaddr);
8368 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
8369 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
8370 phba->HCregaddr);
8243 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 8371 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
8372 writel(hc_copy, phba->HCregaddr);
8244 readl(phba->HAregaddr); /* flush */ 8373 readl(phba->HAregaddr); /* flush */
8245 spin_unlock(&phba->hbalock); 8374 spin_unlock(&phba->hbalock);
8246 8375
@@ -8342,17 +8471,28 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8342 } 8471 }
8343} 8472}
8344 8473
8474/**
8475 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
8476 * @phba: pointer to lpfc hba data structure
8477 * @pIocbIn: pointer to the rspiocbq
8478 * @pIocbOut: pointer to the cmdiocbq
8479 * @wcqe: pointer to the complete wcqe
8480 *
8481 * This routine transfers the fields of a command iocbq to a response iocbq
8482 * by copying all the IOCB fields from command iocbq and transferring the
8483 * completion status information from the complete wcqe.
8484 **/
8345static void 8485static void
8346lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, 8486lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
8487 struct lpfc_iocbq *pIocbIn,
8347 struct lpfc_iocbq *pIocbOut, 8488 struct lpfc_iocbq *pIocbOut,
8348 struct lpfc_wcqe_complete *wcqe) 8489 struct lpfc_wcqe_complete *wcqe)
8349{ 8490{
8491 unsigned long iflags;
8350 size_t offset = offsetof(struct lpfc_iocbq, iocb); 8492 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8351 8493
8352 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 8494 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8353 sizeof(struct lpfc_iocbq) - offset); 8495 sizeof(struct lpfc_iocbq) - offset);
8354 memset(&pIocbIn->sli4_info, 0,
8355 sizeof(struct lpfc_sli4_rspiocb_info));
8356 /* Map WCQE parameters into irspiocb parameters */ 8496 /* Map WCQE parameters into irspiocb parameters */
8357 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 8497 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8358 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 8498 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
@@ -8362,18 +8502,60 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8362 wcqe->total_data_placed; 8502 wcqe->total_data_placed;
8363 else 8503 else
8364 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8504 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8365 else 8505 else {
8366 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8506 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8367 /* Load in additional WCQE parameters */ 8507 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
8368 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); 8508 }
8369 pIocbIn->sli4_info.bfield = 0; 8509
8370 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 8510 /* Pick up HBA exchange busy condition */
8371 pIocbIn->sli4_info.bfield |= LPFC_XB; 8511 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
8372 if (bf_get(lpfc_wcqe_c_pv, wcqe)) { 8512 spin_lock_irqsave(&phba->hbalock, iflags);
8373 pIocbIn->sli4_info.bfield |= LPFC_PV; 8513 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
8374 pIocbIn->sli4_info.priority = 8514 spin_unlock_irqrestore(&phba->hbalock, iflags);
8375 bf_get(lpfc_wcqe_c_priority, wcqe); 8515 }
8516}
8517
8518/**
8519 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
8520 * @phba: Pointer to HBA context object.
8521 * @wcqe: Pointer to work-queue completion queue entry.
8522 *
8523 * This routine handles an ELS work-queue completion event and construct
8524 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
8525 * discovery engine to handle.
8526 *
8527 * Return: Pointer to the receive IOCBQ, NULL otherwise.
8528 **/
8529static struct lpfc_iocbq *
8530lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
8531 struct lpfc_iocbq *irspiocbq)
8532{
8533 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8534 struct lpfc_iocbq *cmdiocbq;
8535 struct lpfc_wcqe_complete *wcqe;
8536 unsigned long iflags;
8537
8538 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
8539 spin_lock_irqsave(&phba->hbalock, iflags);
8540 pring->stats.iocb_event++;
8541 /* Look up the ELS command IOCB and create pseudo response IOCB */
8542 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8543 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8544 spin_unlock_irqrestore(&phba->hbalock, iflags);
8545
8546 if (unlikely(!cmdiocbq)) {
8547 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8548 "0386 ELS complete with no corresponding "
8549 "cmdiocb: iotag (%d)\n",
8550 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8551 lpfc_sli_release_iocbq(phba, irspiocbq);
8552 return NULL;
8376 } 8553 }
8554
8555 /* Fake the irspiocbq and copy necessary response information */
8556 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
8557
8558 return irspiocbq;
8377} 8559}
8378 8560
8379/** 8561/**
@@ -8566,45 +8748,26 @@ static bool
8566lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 8748lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8567 struct lpfc_wcqe_complete *wcqe) 8749 struct lpfc_wcqe_complete *wcqe)
8568{ 8750{
8569 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8570 struct lpfc_iocbq *cmdiocbq;
8571 struct lpfc_iocbq *irspiocbq; 8751 struct lpfc_iocbq *irspiocbq;
8572 unsigned long iflags; 8752 unsigned long iflags;
8573 bool workposted = false;
8574 8753
8575 spin_lock_irqsave(&phba->hbalock, iflags); 8754 /* Get an irspiocbq for later ELS response processing use */
8576 pring->stats.iocb_event++;
8577 /* Look up the ELS command IOCB and create pseudo response IOCB */
8578 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8579 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8580 spin_unlock_irqrestore(&phba->hbalock, iflags);
8581
8582 if (unlikely(!cmdiocbq)) {
8583 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8584 "0386 ELS complete with no corresponding "
8585 "cmdiocb: iotag (%d)\n",
8586 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8587 return workposted;
8588 }
8589
8590 /* Fake the irspiocbq and copy necessary response information */
8591 irspiocbq = lpfc_sli_get_iocbq(phba); 8755 irspiocbq = lpfc_sli_get_iocbq(phba);
8592 if (!irspiocbq) { 8756 if (!irspiocbq) {
8593 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8594 "0387 Failed to allocate an iocbq\n"); 8758 "0387 Failed to allocate an iocbq\n");
8595 return workposted; 8759 return false;
8596 } 8760 }
8597 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8598 8761
8599 /* Add the irspiocb to the response IOCB work list */ 8762 /* Save off the slow-path queue event for work thread to process */
8763 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
8600 spin_lock_irqsave(&phba->hbalock, iflags); 8764 spin_lock_irqsave(&phba->hbalock, iflags);
8601 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); 8765 list_add_tail(&irspiocbq->cq_event.list,
8602 /* Indicate ELS ring attention */ 8766 &phba->sli4_hba.sp_queue_event);
8603 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); 8767 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8604 spin_unlock_irqrestore(&phba->hbalock, iflags); 8768 spin_unlock_irqrestore(&phba->hbalock, iflags);
8605 workposted = true;
8606 8769
8607 return workposted; 8770 return true;
8608} 8771}
8609 8772
8610/** 8773/**
@@ -8690,52 +8853,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8690} 8853}
8691 8854
8692/** 8855/**
8693 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8694 * @phba: Pointer to HBA context object.
8695 * @cq: Pointer to the completion queue.
8696 * @wcqe: Pointer to a completion queue entry.
8697 *
8698 * This routine process a slow-path work-queue completion queue entry.
8699 *
8700 * Return: true if work posted to worker thread, otherwise false.
8701 **/
8702static bool
8703lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8704 struct lpfc_cqe *cqe)
8705{
8706 struct lpfc_wcqe_complete wcqe;
8707 bool workposted = false;
8708
8709 /* Copy the work queue CQE and convert endian order if needed */
8710 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8711
8712 /* Check and process for different type of WCQE and dispatch */
8713 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8714 case CQE_CODE_COMPL_WQE:
8715 /* Process the WQ complete event */
8716 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8717 (struct lpfc_wcqe_complete *)&wcqe);
8718 break;
8719 case CQE_CODE_RELEASE_WQE:
8720 /* Process the WQ release event */
8721 lpfc_sli4_sp_handle_rel_wcqe(phba,
8722 (struct lpfc_wcqe_release *)&wcqe);
8723 break;
8724 case CQE_CODE_XRI_ABORTED:
8725 /* Process the WQ XRI abort event */
8726 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8727 (struct sli4_wcqe_xri_aborted *)&wcqe);
8728 break;
8729 default:
8730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8731 "0388 Not a valid WCQE code: x%x\n",
8732 bf_get(lpfc_wcqe_c_code, &wcqe));
8733 break;
8734 }
8735 return workposted;
8736}
8737
8738/**
8739 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 8856 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8740 * @phba: Pointer to HBA context object. 8857 * @phba: Pointer to HBA context object.
8741 * @rcqe: Pointer to receive-queue completion queue entry. 8858 * @rcqe: Pointer to receive-queue completion queue entry.
@@ -8745,9 +8862,8 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8745 * Return: true if work posted to worker thread, otherwise false. 8862 * Return: true if work posted to worker thread, otherwise false.
8746 **/ 8863 **/
8747static bool 8864static bool
8748lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 8865lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
8749{ 8866{
8750 struct lpfc_rcqe rcqe;
8751 bool workposted = false; 8867 bool workposted = false;
8752 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 8868 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8753 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 8869 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
@@ -8755,31 +8871,28 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8755 uint32_t status; 8871 uint32_t status;
8756 unsigned long iflags; 8872 unsigned long iflags;
8757 8873
8758 /* Copy the receive queue CQE and convert endian order if needed */ 8874 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
8759 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8760 lpfc_sli4_rq_release(hrq, drq);
8761 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8762 goto out;
8763 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8764 goto out; 8875 goto out;
8765 8876
8766 status = bf_get(lpfc_rcqe_status, &rcqe); 8877 status = bf_get(lpfc_rcqe_status, rcqe);
8767 switch (status) { 8878 switch (status) {
8768 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 8879 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8770 "2537 Receive Frame Truncated!!\n"); 8881 "2537 Receive Frame Truncated!!\n");
8771 case FC_STATUS_RQ_SUCCESS: 8882 case FC_STATUS_RQ_SUCCESS:
8883 lpfc_sli4_rq_release(hrq, drq);
8772 spin_lock_irqsave(&phba->hbalock, iflags); 8884 spin_lock_irqsave(&phba->hbalock, iflags);
8773 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 8885 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8774 if (!dma_buf) { 8886 if (!dma_buf) {
8775 spin_unlock_irqrestore(&phba->hbalock, iflags); 8887 spin_unlock_irqrestore(&phba->hbalock, iflags);
8776 goto out; 8888 goto out;
8777 } 8889 }
8778 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); 8890 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
8779 /* save off the frame for the word thread to process */ 8891 /* save off the frame for the word thread to process */
8780 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); 8892 list_add_tail(&dma_buf->cq_event.list,
8893 &phba->sli4_hba.sp_queue_event);
8781 /* Frame received */ 8894 /* Frame received */
8782 phba->hba_flag |= HBA_RECEIVE_BUFFER; 8895 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8783 spin_unlock_irqrestore(&phba->hbalock, iflags); 8896 spin_unlock_irqrestore(&phba->hbalock, iflags);
8784 workposted = true; 8897 workposted = true;
8785 break; 8898 break;
@@ -8794,7 +8907,58 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8794 } 8907 }
8795out: 8908out:
8796 return workposted; 8909 return workposted;
8910}
8911
8912/**
8913 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
8914 * @phba: Pointer to HBA context object.
8915 * @cq: Pointer to the completion queue.
8916 * @wcqe: Pointer to a completion queue entry.
8917 *
8918 * This routine process a slow-path work-queue or recieve queue completion queue
8919 * entry.
8920 *
8921 * Return: true if work posted to worker thread, otherwise false.
8922 **/
8923static bool
8924lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8925 struct lpfc_cqe *cqe)
8926{
8927 struct lpfc_cqe cqevt;
8928 bool workposted = false;
8929
8930 /* Copy the work queue CQE and convert endian order if needed */
8931 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
8797 8932
8933 /* Check and process for different type of WCQE and dispatch */
8934 switch (bf_get(lpfc_cqe_code, &cqevt)) {
8935 case CQE_CODE_COMPL_WQE:
8936 /* Process the WQ/RQ complete event */
8937 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8938 (struct lpfc_wcqe_complete *)&cqevt);
8939 break;
8940 case CQE_CODE_RELEASE_WQE:
8941 /* Process the WQ release event */
8942 lpfc_sli4_sp_handle_rel_wcqe(phba,
8943 (struct lpfc_wcqe_release *)&cqevt);
8944 break;
8945 case CQE_CODE_XRI_ABORTED:
8946 /* Process the WQ XRI abort event */
8947 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8948 (struct sli4_wcqe_xri_aborted *)&cqevt);
8949 break;
8950 case CQE_CODE_RECEIVE:
8951 /* Process the RQ event */
8952 workposted = lpfc_sli4_sp_handle_rcqe(phba,
8953 (struct lpfc_rcqe *)&cqevt);
8954 break;
8955 default:
8956 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8957 "0388 Not a valid WCQE code: x%x\n",
8958 bf_get(lpfc_cqe_code, &cqevt));
8959 break;
8960 }
8961 return workposted;
8798} 8962}
8799 8963
8800/** 8964/**
@@ -8819,8 +8983,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8819 int ecount = 0; 8983 int ecount = 0;
8820 uint16_t cqid; 8984 uint16_t cqid;
8821 8985
8822 if (bf_get(lpfc_eqe_major_code, eqe) != 0 || 8986 if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
8823 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8825 "0359 Not a valid slow-path completion " 8988 "0359 Not a valid slow-path completion "
8826 "event: majorcode=x%x, minorcode=x%x\n", 8989 "event: majorcode=x%x, minorcode=x%x\n",
@@ -8858,14 +9021,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8858 break; 9021 break;
8859 case LPFC_WCQ: 9022 case LPFC_WCQ:
8860 while ((cqe = lpfc_sli4_cq_get(cq))) { 9023 while ((cqe = lpfc_sli4_cq_get(cq))) {
8861 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); 9024 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
8862 if (!(++ecount % LPFC_GET_QE_REL_INT))
8863 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8864 }
8865 break;
8866 case LPFC_RCQ:
8867 while ((cqe = lpfc_sli4_cq_get(cq))) {
8868 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8869 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9025 if (!(++ecount % LPFC_GET_QE_REL_INT))
8870 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9026 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8871 } 9027 }
@@ -8953,7 +9109,13 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8953 } 9109 }
8954 9110
8955 /* Fake the irspiocb and copy necessary response information */ 9111 /* Fake the irspiocb and copy necessary response information */
8956 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe); 9112 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
9113
9114 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
9115 spin_lock_irqsave(&phba->hbalock, iflags);
9116 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
9117 spin_unlock_irqrestore(&phba->hbalock, iflags);
9118 }
8957 9119
8958 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9120 /* Pass the cmd_iocb and the rsp state to the upper layer */
8959 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9121 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
@@ -9059,8 +9221,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9059 uint16_t cqid; 9221 uint16_t cqid;
9060 int ecount = 0; 9222 int ecount = 0;
9061 9223
9062 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) || 9224 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
9063 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
9064 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9065 "0366 Not a valid fast-path completion " 9226 "0366 Not a valid fast-path completion "
9066 "event: majorcode=x%x, minorcode=x%x\n", 9227 "event: majorcode=x%x, minorcode=x%x\n",
@@ -10427,8 +10588,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10427 return xritag; 10588 return xritag;
10428 } 10589 }
10429 spin_unlock_irq(&phba->hbalock); 10590 spin_unlock_irq(&phba->hbalock);
10430 10591 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10431 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10432 "2004 Failed to allocate XRI.last XRITAG is %d" 10592 "2004 Failed to allocate XRI.last XRITAG is %d"
10433 " Max XRI is %d, Used XRI is %d\n", 10593 " Max XRI is %d, Used XRI is %d\n",
10434 phba->sli4_hba.next_xri, 10594 phba->sli4_hba.next_xri,
@@ -10492,15 +10652,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10492 lpfc_sli4_mbox_cmd_free(phba, mbox); 10652 lpfc_sli4_mbox_cmd_free(phba, mbox);
10493 return -ENOMEM; 10653 return -ENOMEM;
10494 } 10654 }
10495
10496 /* Get the first SGE entry from the non-embedded DMA memory */ 10655 /* Get the first SGE entry from the non-embedded DMA memory */
10497 if (unlikely(!mbox->sge_array)) {
10498 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10499 "2525 Failed to get the non-embedded SGE "
10500 "virtual address\n");
10501 lpfc_sli4_mbox_cmd_free(phba, mbox);
10502 return -ENOMEM;
10503 }
10504 viraddr = mbox->sge_array->addr[0]; 10656 viraddr = mbox->sge_array->addr[0];
10505 10657
10506 /* Set up the SGL pages in the non-embedded DMA pages */ 10658 /* Set up the SGL pages in the non-embedded DMA pages */
@@ -10524,8 +10676,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10524 sgl_pg_pairs++; 10676 sgl_pg_pairs++;
10525 } 10677 }
10526 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 10678 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10527 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; 10679 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
10528 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10529 /* Perform endian conversion if necessary */ 10680 /* Perform endian conversion if necessary */
10530 sgl->word0 = cpu_to_le32(sgl->word0); 10681 sgl->word0 = cpu_to_le32(sgl->word0);
10531 10682
@@ -10607,15 +10758,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10607 lpfc_sli4_mbox_cmd_free(phba, mbox); 10758 lpfc_sli4_mbox_cmd_free(phba, mbox);
10608 return -ENOMEM; 10759 return -ENOMEM;
10609 } 10760 }
10610
10611 /* Get the first SGE entry from the non-embedded DMA memory */ 10761 /* Get the first SGE entry from the non-embedded DMA memory */
10612 if (unlikely(!mbox->sge_array)) {
10613 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10614 "2565 Failed to get the non-embedded SGE "
10615 "virtual address\n");
10616 lpfc_sli4_mbox_cmd_free(phba, mbox);
10617 return -ENOMEM;
10618 }
10619 viraddr = mbox->sge_array->addr[0]; 10762 viraddr = mbox->sge_array->addr[0];
10620 10763
10621 /* Set up the SGL pages in the non-embedded DMA pages */ 10764 /* Set up the SGL pages in the non-embedded DMA pages */
@@ -10802,6 +10945,105 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10802} 10945}
10803 10946
10804/** 10947/**
10948 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
10949 * @vport: The vport to work on.
10950 *
10951 * This function updates the receive sequence time stamp for this vport. The
10952 * receive sequence time stamp indicates the time that the last frame of the
10953 * the sequence that has been idle for the longest amount of time was received.
10954 * the driver uses this time stamp to indicate if any received sequences have
10955 * timed out.
10956 **/
10957void
10958lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
10959{
10960 struct lpfc_dmabuf *h_buf;
10961 struct hbq_dmabuf *dmabuf = NULL;
10962
10963 /* get the oldest sequence on the rcv list */
10964 h_buf = list_get_first(&vport->rcv_buffer_list,
10965 struct lpfc_dmabuf, list);
10966 if (!h_buf)
10967 return;
10968 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10969 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
10970}
10971
10972/**
10973 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
10974 * @vport: The vport that the received sequences were sent to.
10975 *
10976 * This function cleans up all outstanding received sequences. This is called
10977 * by the driver when a link event or user action invalidates all the received
10978 * sequences.
10979 **/
10980void
10981lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
10982{
10983 struct lpfc_dmabuf *h_buf, *hnext;
10984 struct lpfc_dmabuf *d_buf, *dnext;
10985 struct hbq_dmabuf *dmabuf = NULL;
10986
10987 /* start with the oldest sequence on the rcv list */
10988 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10989 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10990 list_del_init(&dmabuf->hbuf.list);
10991 list_for_each_entry_safe(d_buf, dnext,
10992 &dmabuf->dbuf.list, list) {
10993 list_del_init(&d_buf->list);
10994 lpfc_in_buf_free(vport->phba, d_buf);
10995 }
10996 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10997 }
10998}
10999
11000/**
11001 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
11002 * @vport: The vport that the received sequences were sent to.
11003 *
11004 * This function determines whether any received sequences have timed out by
11005 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
11006 * indicates that there is at least one timed out sequence this routine will
11007 * go through the received sequences one at a time from most inactive to most
11008 * active to determine which ones need to be cleaned up. Once it has determined
11009 * that a sequence needs to be cleaned up it will simply free up the resources
11010 * without sending an abort.
11011 **/
11012void
11013lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
11014{
11015 struct lpfc_dmabuf *h_buf, *hnext;
11016 struct lpfc_dmabuf *d_buf, *dnext;
11017 struct hbq_dmabuf *dmabuf = NULL;
11018 unsigned long timeout;
11019 int abort_count = 0;
11020
11021 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
11022 vport->rcv_buffer_time_stamp);
11023 if (list_empty(&vport->rcv_buffer_list) ||
11024 time_before(jiffies, timeout))
11025 return;
11026 /* start with the oldest sequence on the rcv list */
11027 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
11028 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
11029 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
11030 dmabuf->time_stamp);
11031 if (time_before(jiffies, timeout))
11032 break;
11033 abort_count++;
11034 list_del_init(&dmabuf->hbuf.list);
11035 list_for_each_entry_safe(d_buf, dnext,
11036 &dmabuf->dbuf.list, list) {
11037 list_del_init(&d_buf->list);
11038 lpfc_in_buf_free(vport->phba, d_buf);
11039 }
11040 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
11041 }
11042 if (abort_count)
11043 lpfc_update_rcv_time_stamp(vport);
11044}
11045
11046/**
10805 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 11047 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10806 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 11048 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10807 * 11049 *
@@ -10823,6 +11065,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10823 struct hbq_dmabuf *seq_dmabuf = NULL; 11065 struct hbq_dmabuf *seq_dmabuf = NULL;
10824 struct hbq_dmabuf *temp_dmabuf = NULL; 11066 struct hbq_dmabuf *temp_dmabuf = NULL;
10825 11067
11068 INIT_LIST_HEAD(&dmabuf->dbuf.list);
11069 dmabuf->time_stamp = jiffies;
10826 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11070 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10827 /* Use the hdr_buf to find the sequence that this frame belongs to */ 11071 /* Use the hdr_buf to find the sequence that this frame belongs to */
10828 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 11072 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
@@ -10841,13 +11085,27 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10841 * Queue the buffer on the vport's rcv_buffer_list. 11085 * Queue the buffer on the vport's rcv_buffer_list.
10842 */ 11086 */
10843 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 11087 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
11088 lpfc_update_rcv_time_stamp(vport);
10844 return dmabuf; 11089 return dmabuf;
10845 } 11090 }
10846 temp_hdr = seq_dmabuf->hbuf.virt; 11091 temp_hdr = seq_dmabuf->hbuf.virt;
10847 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { 11092 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
10848 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); 11093 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
11094 list_del_init(&seq_dmabuf->hbuf.list);
11095 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
11096 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
11097 lpfc_update_rcv_time_stamp(vport);
10849 return dmabuf; 11098 return dmabuf;
10850 } 11099 }
11100 /* move this sequence to the tail to indicate a young sequence */
11101 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
11102 seq_dmabuf->time_stamp = jiffies;
11103 lpfc_update_rcv_time_stamp(vport);
11104 if (list_empty(&seq_dmabuf->dbuf.list)) {
11105 temp_hdr = dmabuf->hbuf.virt;
11106 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
11107 return seq_dmabuf;
11108 }
10851 /* find the correct place in the sequence to insert this frame */ 11109 /* find the correct place in the sequence to insert this frame */
10852 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 11110 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10853 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11111 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10856,7 +11114,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10856 * If the frame's sequence count is greater than the frame on 11114 * If the frame's sequence count is greater than the frame on
10857 * the list then insert the frame right after this frame 11115 * the list then insert the frame right after this frame
10858 */ 11116 */
10859 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { 11117 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
11118 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
10860 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 11119 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10861 return seq_dmabuf; 11120 return seq_dmabuf;
10862 } 11121 }
@@ -10865,6 +11124,210 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10865} 11124}
10866 11125
10867/** 11126/**
11127 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
11128 * @vport: pointer to a vitural port
11129 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11130 *
11131 * This function tries to abort from the partially assembed sequence, described
11132 * by the information from basic abbort @dmabuf. It checks to see whether such
11133 * partially assembled sequence held by the driver. If so, it shall free up all
11134 * the frames from the partially assembled sequence.
11135 *
11136 * Return
11137 * true -- if there is matching partially assembled sequence present and all
11138 * the frames freed with the sequence;
11139 * false -- if there is no matching partially assembled sequence present so
11140 * nothing got aborted in the lower layer driver
11141 **/
11142static bool
11143lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
11144 struct hbq_dmabuf *dmabuf)
11145{
11146 struct fc_frame_header *new_hdr;
11147 struct fc_frame_header *temp_hdr;
11148 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
11149 struct hbq_dmabuf *seq_dmabuf = NULL;
11150
11151 /* Use the hdr_buf to find the sequence that matches this frame */
11152 INIT_LIST_HEAD(&dmabuf->dbuf.list);
11153 INIT_LIST_HEAD(&dmabuf->hbuf.list);
11154 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11155 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
11156 temp_hdr = (struct fc_frame_header *)h_buf->virt;
11157 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
11158 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
11159 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
11160 continue;
11161 /* found a pending sequence that matches this frame */
11162 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
11163 break;
11164 }
11165
11166 /* Free up all the frames from the partially assembled sequence */
11167 if (seq_dmabuf) {
11168 list_for_each_entry_safe(d_buf, n_buf,
11169 &seq_dmabuf->dbuf.list, list) {
11170 list_del_init(&d_buf->list);
11171 lpfc_in_buf_free(vport->phba, d_buf);
11172 }
11173 return true;
11174 }
11175 return false;
11176}
11177
11178/**
11179 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
11180 * @phba: Pointer to HBA context object.
11181 * @cmd_iocbq: pointer to the command iocbq structure.
11182 * @rsp_iocbq: pointer to the response iocbq structure.
11183 *
11184 * This function handles the sequence abort accept iocb command complete
11185 * event. It properly releases the memory allocated to the sequence abort
11186 * accept iocb.
11187 **/
11188static void
11189lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
11190 struct lpfc_iocbq *cmd_iocbq,
11191 struct lpfc_iocbq *rsp_iocbq)
11192{
11193 if (cmd_iocbq)
11194 lpfc_sli_release_iocbq(phba, cmd_iocbq);
11195}
11196
11197/**
11198 * lpfc_sli4_seq_abort_acc - Accept sequence abort
11199 * @phba: Pointer to HBA context object.
11200 * @fc_hdr: pointer to a FC frame header.
11201 *
11202 * This function sends a basic accept to a previous unsol sequence abort
11203 * event after aborting the sequence handling.
11204 **/
11205static void
11206lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
11207 struct fc_frame_header *fc_hdr)
11208{
11209 struct lpfc_iocbq *ctiocb = NULL;
11210 struct lpfc_nodelist *ndlp;
11211 uint16_t oxid, rxid;
11212 uint32_t sid, fctl;
11213 IOCB_t *icmd;
11214
11215 if (!lpfc_is_link_up(phba))
11216 return;
11217
11218 sid = sli4_sid_from_fc_hdr(fc_hdr);
11219 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
11220 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
11221
11222 ndlp = lpfc_findnode_did(phba->pport, sid);
11223 if (!ndlp) {
11224 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
11225 "1268 Find ndlp returned NULL for oxid:x%x "
11226 "SID:x%x\n", oxid, sid);
11227 return;
11228 }
11229
11230 /* Allocate buffer for acc iocb */
11231 ctiocb = lpfc_sli_get_iocbq(phba);
11232 if (!ctiocb)
11233 return;
11234
11235 /* Extract the F_CTL field from FC_HDR */
11236 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
11237
11238 icmd = &ctiocb->iocb;
11239 icmd->un.xseq64.bdl.bdeSize = 0;
11240 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
11241 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11242 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
11243 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
11244
11245 /* Fill in the rest of iocb fields */
11246 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
11247 icmd->ulpBdeCount = 0;
11248 icmd->ulpLe = 1;
11249 icmd->ulpClass = CLASS3;
11250 icmd->ulpContext = ndlp->nlp_rpi;
11251
11252 ctiocb->iocb_cmpl = NULL;
11253 ctiocb->vport = phba->pport;
11254 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
11255
11256 if (fctl & FC_FC_EX_CTX) {
11257 /* ABTS sent by responder to CT exchange, construction
11258 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
11259 * field and RX_ID from ABTS for RX_ID field.
11260 */
11261 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
11262 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
11263 ctiocb->sli4_xritag = oxid;
11264 } else {
11265 /* ABTS sent by initiator to CT exchange, construction
11266 * of BA_ACC will need to allocate a new XRI as for the
11267 * XRI_TAG and RX_ID fields.
11268 */
11269 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
11270 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
11271 ctiocb->sli4_xritag = NO_XRI;
11272 }
11273 bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
11274
11275 /* Xmit CT abts accept on exchange <xid> */
11276 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11277 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
11278 CMD_XMIT_BLS_RSP64_CX, phba->link_state);
11279 lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
11280}
11281
11282/**
11283 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
11284 * @vport: Pointer to the vport on which this sequence was received
11285 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11286 *
11287 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
11288 * receive sequence is only partially assembed by the driver, it shall abort
11289 * the partially assembled frames for the sequence. Otherwise, if the
11290 * unsolicited receive sequence has been completely assembled and passed to
11291 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
11292 * unsolicited sequence has been aborted. After that, it will issue a basic
11293 * accept to accept the abort.
11294 **/
11295void
11296lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
11297 struct hbq_dmabuf *dmabuf)
11298{
11299 struct lpfc_hba *phba = vport->phba;
11300 struct fc_frame_header fc_hdr;
11301 uint32_t fctl;
11302 bool abts_par;
11303
11304 /* Make a copy of fc_hdr before the dmabuf being released */
11305 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
11306 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
11307
11308 if (fctl & FC_FC_EX_CTX) {
11309 /*
11310 * ABTS sent by responder to exchange, just free the buffer
11311 */
11312 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11313 } else {
11314 /*
11315 * ABTS sent by initiator to exchange, need to do cleanup
11316 */
11317 /* Try to abort partially assembled seq */
11318 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
11319
11320 /* Send abort to ULP if partially seq abort failed */
11321 if (abts_par == false)
11322 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
11323 else
11324 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11325 }
11326 /* Send basic accept (BA_ACC) to the abort requester */
11327 lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
11328}
11329
11330/**
10868 * lpfc_seq_complete - Indicates if a sequence is complete 11331 * lpfc_seq_complete - Indicates if a sequence is complete
10869 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11332 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10870 * 11333 *
@@ -10899,7 +11362,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10899 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11362 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10900 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11363 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10901 /* If there is a hole in the sequence count then fail. */ 11364 /* If there is a hole in the sequence count then fail. */
10902 if (++seq_count != hdr->fh_seq_cnt) 11365 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
10903 return 0; 11366 return 0;
10904 fctl = (hdr->fh_f_ctl[0] << 16 | 11367 fctl = (hdr->fh_f_ctl[0] << 16 |
10905 hdr->fh_f_ctl[1] << 8 | 11368 hdr->fh_f_ctl[1] << 8 |
@@ -10931,14 +11394,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10931 struct lpfc_iocbq *first_iocbq, *iocbq; 11394 struct lpfc_iocbq *first_iocbq, *iocbq;
10932 struct fc_frame_header *fc_hdr; 11395 struct fc_frame_header *fc_hdr;
10933 uint32_t sid; 11396 uint32_t sid;
11397 struct ulp_bde64 *pbde;
10934 11398
10935 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11399 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10936 /* remove from receive buffer list */ 11400 /* remove from receive buffer list */
10937 list_del_init(&seq_dmabuf->hbuf.list); 11401 list_del_init(&seq_dmabuf->hbuf.list);
11402 lpfc_update_rcv_time_stamp(vport);
10938 /* get the Remote Port's SID */ 11403 /* get the Remote Port's SID */
10939 sid = (fc_hdr->fh_s_id[0] << 16 | 11404 sid = sli4_sid_from_fc_hdr(fc_hdr);
10940 fc_hdr->fh_s_id[1] << 8 |
10941 fc_hdr->fh_s_id[2]);
10942 /* Get an iocbq struct to fill in. */ 11405 /* Get an iocbq struct to fill in. */
10943 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 11406 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10944 if (first_iocbq) { 11407 if (first_iocbq) {
@@ -10957,7 +11420,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10957 LPFC_DATA_BUF_SIZE; 11420 LPFC_DATA_BUF_SIZE;
10958 first_iocbq->iocb.un.rcvels.remoteID = sid; 11421 first_iocbq->iocb.un.rcvels.remoteID = sid;
10959 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11422 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10960 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11423 bf_get(lpfc_rcqe_length,
11424 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10961 } 11425 }
10962 iocbq = first_iocbq; 11426 iocbq = first_iocbq;
10963 /* 11427 /*
@@ -10972,10 +11436,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10972 if (!iocbq->context3) { 11436 if (!iocbq->context3) {
10973 iocbq->context3 = d_buf; 11437 iocbq->context3 = d_buf;
10974 iocbq->iocb.ulpBdeCount++; 11438 iocbq->iocb.ulpBdeCount++;
10975 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = 11439 pbde = (struct ulp_bde64 *)
10976 LPFC_DATA_BUF_SIZE; 11440 &iocbq->iocb.unsli3.sli3Words[4];
11441 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
10977 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11442 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10978 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11443 bf_get(lpfc_rcqe_length,
11444 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10979 } else { 11445 } else {
10980 iocbq = lpfc_sli_get_iocbq(vport->phba); 11446 iocbq = lpfc_sli_get_iocbq(vport->phba);
10981 if (!iocbq) { 11447 if (!iocbq) {
@@ -10994,7 +11460,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10994 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 11460 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10995 LPFC_DATA_BUF_SIZE; 11461 LPFC_DATA_BUF_SIZE;
10996 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11462 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10997 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11463 bf_get(lpfc_rcqe_length,
11464 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10998 iocbq->iocb.un.rcvels.remoteID = sid; 11465 iocbq->iocb.un.rcvels.remoteID = sid;
10999 list_add_tail(&iocbq->list, &first_iocbq->list); 11466 list_add_tail(&iocbq->list, &first_iocbq->list);
11000 } 11467 }
@@ -11002,6 +11469,43 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11002 return first_iocbq; 11469 return first_iocbq;
11003} 11470}
11004 11471
11472static void
11473lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
11474 struct hbq_dmabuf *seq_dmabuf)
11475{
11476 struct fc_frame_header *fc_hdr;
11477 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
11478 struct lpfc_hba *phba = vport->phba;
11479
11480 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11481 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11482 if (!iocbq) {
11483 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11484 "2707 Ring %d handler: Failed to allocate "
11485 "iocb Rctl x%x Type x%x received\n",
11486 LPFC_ELS_RING,
11487 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11488 return;
11489 }
11490 if (!lpfc_complete_unsol_iocb(phba,
11491 &phba->sli.ring[LPFC_ELS_RING],
11492 iocbq, fc_hdr->fh_r_ctl,
11493 fc_hdr->fh_type))
11494 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11495 "2540 Ring %d handler: unexpected Rctl "
11496 "x%x Type x%x received\n",
11497 LPFC_ELS_RING,
11498 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11499
11500 /* Free iocb created in lpfc_prep_seq */
11501 list_for_each_entry_safe(curr_iocb, next_iocb,
11502 &iocbq->list, list) {
11503 list_del_init(&curr_iocb->list);
11504 lpfc_sli_release_iocbq(phba, curr_iocb);
11505 }
11506 lpfc_sli_release_iocbq(phba, iocbq);
11507}
11508
11005/** 11509/**
11006 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 11510 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
11007 * @phba: Pointer to HBA context object. 11511 * @phba: Pointer to HBA context object.
@@ -11014,67 +11518,48 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11014 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 11518 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
11015 * appropriate receive function when the final frame in a sequence is received. 11519 * appropriate receive function when the final frame in a sequence is received.
11016 **/ 11520 **/
11017int 11521void
11018lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) 11522lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11523 struct hbq_dmabuf *dmabuf)
11019{ 11524{
11020 LIST_HEAD(cmplq); 11525 struct hbq_dmabuf *seq_dmabuf;
11021 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
11022 struct fc_frame_header *fc_hdr; 11526 struct fc_frame_header *fc_hdr;
11023 struct lpfc_vport *vport; 11527 struct lpfc_vport *vport;
11024 uint32_t fcfi; 11528 uint32_t fcfi;
11025 struct lpfc_iocbq *iocbq;
11026
11027 /* Clear hba flag and get all received buffers into the cmplq */
11028 spin_lock_irq(&phba->hbalock);
11029 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
11030 list_splice_init(&phba->rb_pend_list, &cmplq);
11031 spin_unlock_irq(&phba->hbalock);
11032 11529
11033 /* Process each received buffer */ 11530 /* Process each received buffer */
11034 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { 11531 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11035 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11532 /* check to see if this a valid type of frame */
11036 /* check to see if this a valid type of frame */ 11533 if (lpfc_fc_frame_check(phba, fc_hdr)) {
11037 if (lpfc_fc_frame_check(phba, fc_hdr)) { 11534 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11038 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11535 return;
11039 continue; 11536 }
11040 } 11537 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
11041 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); 11538 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
11042 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 11539 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
11043 if (!vport) { 11540 /* throw out the frame */
11044 /* throw out the frame */ 11541 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11045 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11542 return;
11046 continue; 11543 }
11047 } 11544 /* Handle the basic abort sequence (BA_ABTS) event */
11048 /* Link this frame */ 11545 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
11049 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 11546 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
11050 if (!seq_dmabuf) { 11547 return;
11051 /* unable to add frame to vport - throw it out */ 11548 }
11052 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11549
11053 continue; 11550 /* Link this frame */
11054 } 11551 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11055 /* If not last frame in sequence continue processing frames. */ 11552 if (!seq_dmabuf) {
11056 if (!lpfc_seq_complete(seq_dmabuf)) { 11553 /* unable to add frame to vport - throw it out */
11057 /* 11554 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11058 * When saving off frames post a new one and mark this 11555 return;
11059 * frame to be freed when it is finished. 11556 }
11060 **/ 11557 /* If not last frame in sequence continue processing frames. */
11061 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); 11558 if (!lpfc_seq_complete(seq_dmabuf))
11062 dmabuf->tag = -1; 11559 return;
11063 continue; 11560
11064 } 11561 /* Send the complete sequence to the upper layer protocol */
11065 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11562 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
11066 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11067 if (!lpfc_complete_unsol_iocb(phba,
11068 &phba->sli.ring[LPFC_ELS_RING],
11069 iocbq, fc_hdr->fh_r_ctl,
11070 fc_hdr->fh_type))
11071 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11072 "2540 Ring %d handler: unexpected Rctl "
11073 "x%x Type x%x received\n",
11074 LPFC_ELS_RING,
11075 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11076 };
11077 return 0;
11078} 11563}
11079 11564
11080/** 11565/**
@@ -11091,7 +11576,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
11091 * sequential. 11576 * sequential.
11092 * 11577 *
11093 * Return codes 11578 * Return codes
11094 * 0 - sucessful 11579 * 0 - successful
11095 * EIO - The mailbox failed to complete successfully. 11580 * EIO - The mailbox failed to complete successfully.
11096 * When this error occurs, the driver is not guaranteed 11581 * When this error occurs, the driver is not guaranteed
11097 * to have any rpi regions posted to the device and 11582 * to have any rpi regions posted to the device and
@@ -11129,7 +11614,7 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11129 * maps up to 64 rpi context regions. 11614 * maps up to 64 rpi context regions.
11130 * 11615 *
11131 * Return codes 11616 * Return codes
11132 * 0 - sucessful 11617 * 0 - successful
11133 * ENOMEM - No available memory 11618 * ENOMEM - No available memory
11134 * EIO - The mailbox failed to complete successfully. 11619 * EIO - The mailbox failed to complete successfully.
11135 **/ 11620 **/
@@ -11191,7 +11676,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11191 * PAGE_SIZE modulo 64 rpi context headers. 11676 * PAGE_SIZE modulo 64 rpi context headers.
11192 * 11677 *
11193 * Returns 11678 * Returns
11194 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful 11679 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
11195 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 11680 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11196 **/ 11681 **/
11197int 11682int
@@ -11334,6 +11819,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11334{ 11819{
11335 LPFC_MBOXQ_t *mboxq; 11820 LPFC_MBOXQ_t *mboxq;
11336 int rc = 0; 11821 int rc = 0;
11822 int retval = MBX_SUCCESS;
11337 uint32_t mbox_tmo; 11823 uint32_t mbox_tmo;
11338 11824
11339 if (vpi == 0) 11825 if (vpi == 0)
@@ -11344,16 +11830,17 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11344 lpfc_init_vpi(phba, mboxq, vpi); 11830 lpfc_init_vpi(phba, mboxq, vpi);
11345 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 11831 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11346 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11832 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11347 if (rc != MBX_TIMEOUT)
11348 mempool_free(mboxq, phba->mbox_mem_pool);
11349 if (rc != MBX_SUCCESS) { 11833 if (rc != MBX_SUCCESS) {
11350 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11351 "2022 INIT VPI Mailbox failed " 11835 "2022 INIT VPI Mailbox failed "
11352 "status %d, mbxStatus x%x\n", rc, 11836 "status %d, mbxStatus x%x\n", rc,
11353 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 11837 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11354 rc = -EIO; 11838 retval = -EIO;
11355 } 11839 }
11356 return rc; 11840 if (rc != MBX_TIMEOUT)
11841 mempool_free(mboxq, phba->mbox_mem_pool);
11842
11843 return retval;
11357} 11844}
11358 11845
11359/** 11846/**
@@ -11438,13 +11925,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11438 */ 11925 */
11439 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 11926 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11440 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 11927 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11441 if (unlikely(!mboxq->sge_array)) {
11442 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11443 "2526 Failed to get the non-embedded SGE "
11444 "virtual address\n");
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 return -ENOMEM;
11447 }
11448 virt_addr = mboxq->sge_array->addr[0]; 11928 virt_addr = mboxq->sge_array->addr[0];
11449 /* 11929 /*
11450 * Configure the FCF record for FCFI 0. This is the driver's 11930 * Configure the FCF record for FCFI 0. This is the driver's
@@ -11517,24 +11997,22 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11517} 11997}
11518 11998
11519/** 11999/**
11520 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. 12000 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
11521 * @phba: pointer to lpfc hba data structure. 12001 * @phba: pointer to lpfc hba data structure.
11522 * @fcf_index: FCF table entry offset. 12002 * @fcf_index: FCF table entry offset.
11523 * 12003 *
11524 * This routine is invoked to read up to @fcf_num of FCF record from the 12004 * This routine is invoked to scan the entire FCF table by reading FCF
11525 * device starting with the given @fcf_index. 12005 * record and processing it one at a time starting from the @fcf_index
12006 * for initial FCF discovery or fast FCF failover rediscovery.
12007 *
12008 * Return 0 if the mailbox command is submitted sucessfully, none 0
12009 * otherwise.
11526 **/ 12010 **/
11527int 12011int
11528lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) 12012lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
11529{ 12013{
11530 int rc = 0, error; 12014 int rc = 0, error;
11531 LPFC_MBOXQ_t *mboxq; 12015 LPFC_MBOXQ_t *mboxq;
11532 void *virt_addr;
11533 dma_addr_t phys_addr;
11534 uint8_t *bytep;
11535 struct lpfc_mbx_sge sge;
11536 uint32_t alloc_len, req_len;
11537 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11538 12016
11539 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 12017 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
11540 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12018 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -11542,59 +12020,347 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11543 "2000 Failed to allocate mbox for " 12021 "2000 Failed to allocate mbox for "
11544 "READ_FCF cmd\n"); 12022 "READ_FCF cmd\n");
11545 return -ENOMEM; 12023 error = -ENOMEM;
12024 goto fail_fcf_scan;
11546 } 12025 }
12026 /* Construct the read FCF record mailbox command */
12027 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12028 if (rc) {
12029 error = -EINVAL;
12030 goto fail_fcf_scan;
12031 }
12032 /* Issue the mailbox command asynchronously */
12033 mboxq->vport = phba->pport;
12034 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
12035 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12036 if (rc == MBX_NOT_FINISHED)
12037 error = -EIO;
12038 else {
12039 spin_lock_irq(&phba->hbalock);
12040 phba->hba_flag |= FCF_DISC_INPROGRESS;
12041 spin_unlock_irq(&phba->hbalock);
12042 /* Reset FCF round robin index bmask for new scan */
12043 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
12044 memset(phba->fcf.fcf_rr_bmask, 0,
12045 sizeof(*phba->fcf.fcf_rr_bmask));
12046 error = 0;
12047 }
12048fail_fcf_scan:
12049 if (error) {
12050 if (mboxq)
12051 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12052 /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
12053 spin_lock_irq(&phba->hbalock);
12054 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
12055 spin_unlock_irq(&phba->hbalock);
12056 }
12057 return error;
12058}
11547 12059
11548 req_len = sizeof(struct fcf_record) + 12060/**
11549 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); 12061 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
12062 * @phba: pointer to lpfc hba data structure.
12063 * @fcf_index: FCF table entry offset.
12064 *
12065 * This routine is invoked to read an FCF record indicated by @fcf_index
12066 * and to use it for FLOGI round robin FCF failover.
12067 *
12068 * Return 0 if the mailbox command is submitted sucessfully, none 0
12069 * otherwise.
12070 **/
12071int
12072lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12073{
12074 int rc = 0, error;
12075 LPFC_MBOXQ_t *mboxq;
11550 12076
11551 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ 12077 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11552 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 12078 if (!mboxq) {
11553 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, 12079 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
11554 LPFC_SLI4_MBX_NEMBED); 12080 "2763 Failed to allocate mbox for "
12081 "READ_FCF cmd\n");
12082 error = -ENOMEM;
12083 goto fail_fcf_read;
12084 }
12085 /* Construct the read FCF record mailbox command */
12086 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12087 if (rc) {
12088 error = -EINVAL;
12089 goto fail_fcf_read;
12090 }
12091 /* Issue the mailbox command asynchronously */
12092 mboxq->vport = phba->pport;
12093 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
12094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12095 if (rc == MBX_NOT_FINISHED)
12096 error = -EIO;
12097 else
12098 error = 0;
11555 12099
11556 if (alloc_len < req_len) { 12100fail_fcf_read:
11557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12101 if (error && mboxq)
11558 "0291 Allocated DMA memory size (x%x) is "
11559 "less than the requested DMA memory "
11560 "size (x%x)\n", alloc_len, req_len);
11561 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12102 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11562 return -ENOMEM; 12103 return error;
12104}
12105
12106/**
12107 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
12108 * @phba: pointer to lpfc hba data structure.
12109 * @fcf_index: FCF table entry offset.
12110 *
12111 * This routine is invoked to read an FCF record indicated by @fcf_index to
12112 * determine whether it's eligible for FLOGI round robin failover list.
12113 *
12114 * Return 0 if the mailbox command is submitted sucessfully, none 0
12115 * otherwise.
12116 **/
12117int
12118lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12119{
12120 int rc = 0, error;
12121 LPFC_MBOXQ_t *mboxq;
12122
12123 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12124 if (!mboxq) {
12125 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12126 "2758 Failed to allocate mbox for "
12127 "READ_FCF cmd\n");
12128 error = -ENOMEM;
12129 goto fail_fcf_read;
12130 }
12131 /* Construct the read FCF record mailbox command */
12132 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12133 if (rc) {
12134 error = -EINVAL;
12135 goto fail_fcf_read;
11563 } 12136 }
12137 /* Issue the mailbox command asynchronously */
12138 mboxq->vport = phba->pport;
12139 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
12140 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12141 if (rc == MBX_NOT_FINISHED)
12142 error = -EIO;
12143 else
12144 error = 0;
11564 12145
11565 /* Get the first SGE entry from the non-embedded DMA memory. This 12146fail_fcf_read:
11566 * routine only uses a single SGE. 12147 if (error && mboxq)
11567 */
11568 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11569 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11570 if (unlikely(!mboxq->sge_array)) {
11571 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11572 "2527 Failed to get the non-embedded SGE "
11573 "virtual address\n");
11574 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12148 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12149 return error;
12150}
12151
12152/**
12153 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
12154 * @phba: pointer to lpfc hba data structure.
12155 *
12156 * This routine is to get the next eligible FCF record index in a round
12157 * robin fashion. If the next eligible FCF record index equals to the
12158 * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
12159 * shall be returned, otherwise, the next eligible FCF record's index
12160 * shall be returned.
12161 **/
12162uint16_t
12163lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12164{
12165 uint16_t next_fcf_index;
12166
12167 /* Search from the currently registered FCF index */
12168 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12169 LPFC_SLI4_FCF_TBL_INDX_MAX,
12170 phba->fcf.current_rec.fcf_indx);
12171 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12172 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
12173 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12174 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
12175 /* Round robin failover stop condition */
12176 if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
12177 return LPFC_FCOE_FCF_NEXT_NONE;
12178
12179 return next_fcf_index;
12180}
12181
12182/**
12183 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
12184 * @phba: pointer to lpfc hba data structure.
12185 *
12186 * This routine sets the FCF record index in to the eligible bmask for
12187 * round robin failover search. It checks to make sure that the index
12188 * does not go beyond the range of the driver allocated bmask dimension
12189 * before setting the bit.
12190 *
12191 * Returns 0 if the index bit successfully set, otherwise, it returns
12192 * -EINVAL.
12193 **/
12194int
12195lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12196{
12197 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12198 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12199 "2610 HBA FCF index reached driver's "
12200 "book keeping dimension: fcf_index:%d, "
12201 "driver_bmask_max:%d\n",
12202 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12203 return -EINVAL;
12204 }
12205 /* Set the eligible FCF record index bmask */
12206 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12207
12208 return 0;
12209}
12210
12211/**
12212 * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
12213 * @phba: pointer to lpfc hba data structure.
12214 *
12215 * This routine clears the FCF record index from the eligible bmask for
12216 * round robin failover search. It checks to make sure that the index
12217 * does not go beyond the range of the driver allocated bmask dimension
12218 * before clearing the bit.
12219 **/
12220void
12221lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12222{
12223 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12224 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12225 "2762 HBA FCF index goes beyond driver's "
12226 "book keeping dimension: fcf_index:%d, "
12227 "driver_bmask_max:%d\n",
12228 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12229 return;
12230 }
12231 /* Clear the eligible FCF record index bmask */
12232 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12233}
12234
12235/**
12236 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
12237 * @phba: pointer to lpfc hba data structure.
12238 *
12239 * This routine is the completion routine for the rediscover FCF table mailbox
12240 * command. If the mailbox command returned failure, it will try to stop the
12241 * FCF rediscover wait timer.
12242 **/
12243void
12244lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12245{
12246 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12247 uint32_t shdr_status, shdr_add_status;
12248
12249 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
12250
12251 shdr_status = bf_get(lpfc_mbox_hdr_status,
12252 &redisc_fcf->header.cfg_shdr.response);
12253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12254 &redisc_fcf->header.cfg_shdr.response);
12255 if (shdr_status || shdr_add_status) {
12256 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12257 "2746 Requesting for FCF rediscovery failed "
12258 "status x%x add_status x%x\n",
12259 shdr_status, shdr_add_status);
12260 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
12261 spin_lock_irq(&phba->hbalock);
12262 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
12263 spin_unlock_irq(&phba->hbalock);
12264 /*
12265 * CVL event triggered FCF rediscover request failed,
12266 * last resort to re-try current registered FCF entry.
12267 */
12268 lpfc_retry_pport_discovery(phba);
12269 } else {
12270 spin_lock_irq(&phba->hbalock);
12271 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
12272 spin_unlock_irq(&phba->hbalock);
12273 /*
12274 * DEAD FCF event triggered FCF rediscover request
12275 * failed, last resort to fail over as a link down
12276 * to FCF registration.
12277 */
12278 lpfc_sli4_fcf_dead_failthrough(phba);
12279 }
12280 } else {
12281 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12282 "2775 Start FCF rediscovery quiescent period "
12283 "wait timer before scaning FCF table\n");
12284 /*
12285 * Start FCF rediscovery wait timer for pending FCF
12286 * before rescan FCF record table.
12287 */
12288 lpfc_fcf_redisc_wait_start_timer(phba);
12289 }
12290
12291 mempool_free(mbox, phba->mbox_mem_pool);
12292}
12293
12294/**
12295 * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port.
12296 * @phba: pointer to lpfc hba data structure.
12297 *
12298 * This routine is invoked to request for rediscovery of the entire FCF table
12299 * by the port.
12300 **/
12301int
12302lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12303{
12304 LPFC_MBOXQ_t *mbox;
12305 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12306 int rc, length;
12307
12308 /* Cancel retry delay timers to all vports before FCF rediscover */
12309 lpfc_cancel_all_vport_retry_delay_timer(phba);
12310
12311 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12312 if (!mbox) {
12313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12314 "2745 Failed to allocate mbox for "
12315 "requesting FCF rediscover.\n");
11575 return -ENOMEM; 12316 return -ENOMEM;
11576 } 12317 }
11577 virt_addr = mboxq->sge_array->addr[0];
11578 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11579 12318
11580 /* Set up command fields */ 12319 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
11581 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); 12320 sizeof(struct lpfc_sli4_cfg_mhdr));
11582 /* Perform necessary endian conversion */ 12321 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
11583 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 12322 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
11584 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); 12323 length, LPFC_SLI4_MBX_EMBED);
11585 mboxq->vport = phba->pport; 12324
11586 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 12325 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
11587 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12326 /* Set count to 0 for invalidating the entire FCF database */
12327 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
12328
12329 /* Issue the mailbox command asynchronously */
12330 mbox->vport = phba->pport;
12331 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
12332 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
12333
11588 if (rc == MBX_NOT_FINISHED) { 12334 if (rc == MBX_NOT_FINISHED) {
11589 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12335 mempool_free(mbox, phba->mbox_mem_pool);
11590 error = -EIO; 12336 return -EIO;
11591 } else {
11592 spin_lock_irq(&phba->hbalock);
11593 phba->hba_flag |= FCF_DISC_INPROGRESS;
11594 spin_unlock_irq(&phba->hbalock);
11595 error = 0;
11596 } 12337 }
11597 return error; 12338 return 0;
12339}
12340
12341/**
12342 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
12343 * @phba: pointer to lpfc hba data structure.
12344 *
12345 * This function is the failover routine as a last resort to the FCF DEAD
12346 * event when driver failed to perform fast FCF failover.
12347 **/
12348void
12349lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
12350{
12351 uint32_t link_state;
12352
12353 /*
12354 * Last resort as FCF DEAD event failover will treat this as
12355 * a link down, but save the link state because we don't want
12356 * it to be changed to Link Down unless it is already down.
12357 */
12358 link_state = phba->link_state;
12359 lpfc_linkdown(phba);
12360 phba->link_state = link_state;
12361
12362 /* Unregister FCF if no devices connected to it */
12363 lpfc_unregister_unused_fcf(phba);
11598} 12364}
11599 12365
11600/** 12366/**
@@ -11725,3 +12491,48 @@ out:
11725 kfree(rgn23_data); 12491 kfree(rgn23_data);
11726 return; 12492 return;
11727} 12493}
12494
12495/**
12496 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
12497 * @vport: pointer to vport data structure.
12498 *
12499 * This function iterate through the mailboxq and clean up all REG_LOGIN
12500 * and REG_VPI mailbox commands associated with the vport. This function
12501 * is called when driver want to restart discovery of the vport due to
12502 * a Clear Virtual Link event.
12503 **/
12504void
12505lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12506{
12507 struct lpfc_hba *phba = vport->phba;
12508 LPFC_MBOXQ_t *mb, *nextmb;
12509 struct lpfc_dmabuf *mp;
12510
12511 spin_lock_irq(&phba->hbalock);
12512 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
12513 if (mb->vport != vport)
12514 continue;
12515
12516 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
12517 (mb->u.mb.mbxCommand != MBX_REG_VPI))
12518 continue;
12519
12520 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12521 mp = (struct lpfc_dmabuf *) (mb->context1);
12522 if (mp) {
12523 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
12524 kfree(mp);
12525 }
12526 }
12527 list_del(&mb->list);
12528 mempool_free(mb, phba->mbox_mem_pool);
12529 }
12530 mb = phba->sli.mbox_active;
12531 if (mb && (mb->vport == vport)) {
12532 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
12533 (mb->u.mb.mbxCommand == MBX_REG_VPI))
12534 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12535 }
12536 spin_unlock_irq(&phba->hbalock);
12537}
12538
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 3c53316cf6d0..b4a639c47616 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */ 32struct lpfc_cq_event {
33struct lpfc_sli4_rspiocb_info { 33 struct list_head list;
34 uint8_t hw_status; 34 union {
35 uint8_t bfield; 35 struct lpfc_mcqe mcqe_cmpl;
36#define LPFC_XB 0x1 36 struct lpfc_acqe_link acqe_link;
37#define LPFC_PV 0x2 37 struct lpfc_acqe_fcoe acqe_fcoe;
38 uint8_t priority; 38 struct lpfc_acqe_dcbx acqe_dcbx;
39 uint8_t reserved; 39 struct lpfc_rcqe rcqe_cmpl;
40 struct sli4_wcqe_xri_aborted wcqe_axri;
41 struct lpfc_wcqe_complete wcqe_cmpl;
42 } cqe;
40}; 43};
41 44
42/* This structure is used to handle IOCB requests / responses */ 45/* This structure is used to handle IOCB requests / responses */
@@ -46,21 +49,27 @@ struct lpfc_iocbq {
46 struct list_head clist; 49 struct list_head clist;
47 uint16_t iotag; /* pre-assigned IO tag */ 50 uint16_t iotag; /* pre-assigned IO tag */
48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 51 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
52 struct lpfc_cq_event cq_event;
49 53
50 IOCB_t iocb; /* IOCB cmd */ 54 IOCB_t iocb; /* IOCB cmd */
51 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 55 uint8_t retry; /* retry counter for IOCB cmd - if needed */
52 uint8_t iocb_flag; 56 uint16_t iocb_flag;
53#define LPFC_IO_LIBDFC 1 /* libdfc iocb */ 57#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
54#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ 58#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
55#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 59#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
56#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 60#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
57#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 61#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
58#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
59#define LPFC_FIP_ELS 0x40 63#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
64#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
65#define DSS_SECURITY_OP 0x100 /* security IO */
66
67#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
68#define LPFC_FIP_ELS_ID_SHIFT 14
60 69
61 uint8_t abort_count;
62 uint8_t rsvd2; 70 uint8_t rsvd2;
63 uint32_t drvrTimeout; /* driver timeout in seconds */ 71 uint32_t drvrTimeout; /* driver timeout in seconds */
72 uint32_t fcp_wqidx; /* index to FCP work queue */
64 struct lpfc_vport *vport;/* virtual port pointer */ 73 struct lpfc_vport *vport;/* virtual port pointer */
65 void *context1; /* caller context information */ 74 void *context1; /* caller context information */
66 void *context2; /* caller context information */ 75 void *context2; /* caller context information */
@@ -76,7 +85,6 @@ struct lpfc_iocbq {
76 struct lpfc_iocbq *); 85 struct lpfc_iocbq *);
77 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 86 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
78 struct lpfc_iocbq *); 87 struct lpfc_iocbq *);
79 struct lpfc_sli4_rspiocb_info sli4_info;
80}; 88};
81 89
82#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 90#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -110,7 +118,7 @@ typedef struct lpfcMboxq {
110 return */ 118 return */
111#define MBX_NOWAIT 2 /* issue command then return immediately */ 119#define MBX_NOWAIT 2 /* issue command then return immediately */
112 120
113#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per 121#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per
114 ring */ 122 ring */
115#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */ 123#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */
116 124
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index b5f4ba1a5c27..4a35e7b9bc5b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -22,13 +22,17 @@
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32 23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10 24#define LPFC_RPI_LOW_WATER_MARK 10
25
26/* Amount of time in seconds for waiting FCF rediscovery to complete */
27#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
28
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ 29/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254 30#define LPFC_NEMBED_MBOX_SGL_CNT 254
27 31
28/* Multi-queue arrangement for fast-path FCP work queues */ 32/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8 33#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1 34#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1 35#define LPFC_FP_EQN_DEF 4
32#define LPFC_FP_EQN_MIN 1 36#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) 37#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34 38
@@ -58,6 +62,16 @@
58#define LPFC_FCOE_FKA_ADV_PER 0 62#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80 63#define LPFC_FCOE_FIP_PRIORITY 0x80
60 64
65#define sli4_sid_from_fc_hdr(fc_hdr) \
66 ((fc_hdr)->fh_s_id[0] << 16 | \
67 (fc_hdr)->fh_s_id[1] << 8 | \
68 (fc_hdr)->fh_s_id[2])
69
70#define sli4_fctl_from_fc_hdr(fc_hdr) \
71 ((fc_hdr)->fh_f_ctl[0] << 16 | \
72 (fc_hdr)->fh_f_ctl[1] << 8 | \
73 (fc_hdr)->fh_f_ctl[2])
74
61enum lpfc_sli4_queue_type { 75enum lpfc_sli4_queue_type {
62 LPFC_EQ, 76 LPFC_EQ,
63 LPFC_GCQ, 77 LPFC_GCQ,
@@ -110,44 +124,56 @@ struct lpfc_queue {
110 union sli4_qe qe[1]; /* array to index entries (must be last) */ 124 union sli4_qe qe[1]; /* array to index entries (must be last) */
111}; 125};
112 126
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link { 127struct lpfc_sli4_link {
126 uint8_t speed; 128 uint8_t speed;
127 uint8_t duplex; 129 uint8_t duplex;
128 uint8_t status; 130 uint8_t status;
129 uint8_t physical; 131 uint8_t physical;
130 uint8_t fault; 132 uint8_t fault;
133 uint16_t logical_speed;
131}; 134};
132 135
133struct lpfc_fcf { 136struct lpfc_fcf_rec {
134 uint8_t fabric_name[8]; 137 uint8_t fabric_name[8];
135 uint8_t switch_name[8]; 138 uint8_t switch_name[8];
136 uint8_t mac_addr[6]; 139 uint8_t mac_addr[6];
137 uint16_t fcf_indx; 140 uint16_t fcf_indx;
141 uint32_t priority;
142 uint16_t vlan_id;
143 uint32_t addr_mode;
144 uint32_t flag;
145#define BOOT_ENABLE 0x01
146#define RECORD_VALID 0x02
147};
148
149struct lpfc_fcf {
138 uint16_t fcfi; 150 uint16_t fcfi;
139 uint32_t fcf_flag; 151 uint32_t fcf_flag;
140#define FCF_AVAILABLE 0x01 /* FCF available for discovery */ 152#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
141#define FCF_REGISTERED 0x02 /* FCF registered with FW */ 153#define FCF_REGISTERED 0x02 /* FCF registered with FW */
142#define FCF_DISCOVERED 0x04 /* FCF discovery started */ 154#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
143#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */ 155#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
144#define FCF_IN_USE 0x10 /* Atleast one discovery completed */ 156#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
145#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */ 157#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
146 uint32_t priority; 158#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
159#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
160#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
161#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
162#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
147 uint32_t addr_mode; 163 uint32_t addr_mode;
148 uint16_t vlan_id; 164 uint16_t fcf_rr_init_indx;
165 struct lpfc_fcf_rec current_rec;
166 struct lpfc_fcf_rec failover_rec;
167 struct timer_list redisc_wait;
168 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
149}; 169};
150 170
171/*
172 * Maximum FCF table index, it is for driver internal book keeping, it
173 * just needs to be no less than the supported HBA's FCF table size.
174 */
175#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
176
151#define LPFC_REGION23_SIGNATURE "RG23" 177#define LPFC_REGION23_SIGNATURE "RG23"
152#define LPFC_REGION23_VERSION 1 178#define LPFC_REGION23_VERSION 1
153#define LPFC_REGION23_LAST_REC 0xff 179#define LPFC_REGION23_LAST_REC 0xff
@@ -166,7 +192,7 @@ struct lpfc_fip_param_hdr {
166#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 192#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
167#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 193#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
168#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags 194#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
169#define FIPP_MODE_ON 0x2 195#define FIPP_MODE_ON 0x1
170#define FIPP_MODE_OFF 0x0 196#define FIPP_MODE_OFF 0x0
171#define FIPP_VLAN_VALID 0x1 197#define FIPP_VLAN_VALID 0x1
172}; 198};
@@ -250,7 +276,10 @@ struct lpfc_bmbx {
250#define SLI4_CT_VFI 2 276#define SLI4_CT_VFI 2
251#define SLI4_CT_FCFI 3 277#define SLI4_CT_FCFI 3
252 278
253#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000 279#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
280#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000
281#define LPFC_SLI4_MIN_BUF_SIZE 0x400
282#define LPFC_SLI4_MAX_BUF_SIZE 0x20000
254 283
255/* 284/*
256 * SLI4 specific data structures 285 * SLI4 specific data structures
@@ -284,6 +313,42 @@ struct lpfc_fcp_eq_hdl {
284 struct lpfc_hba *phba; 313 struct lpfc_hba *phba;
285}; 314};
286 315
316/* Port Capabilities for SLI4 Parameters */
317struct lpfc_pc_sli4_params {
318 uint32_t supported;
319 uint32_t if_type;
320 uint32_t sli_rev;
321 uint32_t sli_family;
322 uint32_t featurelevel_1;
323 uint32_t featurelevel_2;
324 uint32_t proto_types;
325#define LPFC_SLI4_PROTO_FCOE 0x0000001
326#define LPFC_SLI4_PROTO_FC 0x0000002
327#define LPFC_SLI4_PROTO_NIC 0x0000004
328#define LPFC_SLI4_PROTO_ISCSI 0x0000008
329#define LPFC_SLI4_PROTO_RDMA 0x0000010
330 uint32_t sge_supp_len;
331 uint32_t if_page_sz;
332 uint32_t rq_db_window;
333 uint32_t loopbk_scope;
334 uint32_t eq_pages_max;
335 uint32_t eqe_size;
336 uint32_t cq_pages_max;
337 uint32_t cqe_size;
338 uint32_t mq_pages_max;
339 uint32_t mqe_size;
340 uint32_t mq_elem_cnt;
341 uint32_t wq_pages_max;
342 uint32_t wqe_size;
343 uint32_t rq_pages_max;
344 uint32_t rqe_size;
345 uint32_t hdr_pages_max;
346 uint32_t hdr_size;
347 uint32_t hdr_pp_align;
348 uint32_t sgl_pages_max;
349 uint32_t sgl_pp_align;
350};
351
287/* SLI4 HBA data structure entries */ 352/* SLI4 HBA data structure entries */
288struct lpfc_sli4_hba { 353struct lpfc_sli4_hba {
289 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 354 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -295,10 +360,9 @@ struct lpfc_sli4_hba {
295 /* BAR0 PCI config space register memory map */ 360 /* BAR0 PCI config space register memory map */
296 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ 361 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
297 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ 362 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
298 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ 363 void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
299 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ 364 void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
300#define LPFC_ONLINE_NERR 0xFFFFFFFF 365 void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */
301 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
302 /* BAR1 FCoE function CSR register memory map */ 366 /* BAR1 FCoE function CSR register memory map */
303 void __iomem *STAregaddr; /* Address to HST_STATE register */ 367 void __iomem *STAregaddr; /* Address to HST_STATE register */
304 void __iomem *ISRregaddr; /* Address to HST_ISR register */ 368 void __iomem *ISRregaddr; /* Address to HST_ISR register */
@@ -311,6 +375,10 @@ struct lpfc_sli4_hba {
311 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ 375 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
312 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ 376 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
313 377
378 uint32_t ue_mask_lo;
379 uint32_t ue_mask_hi;
380 struct lpfc_register sli_intf;
381 struct lpfc_pc_sli4_params pc_sli4_params;
314 struct msix_entry *msix_entries; 382 struct msix_entry *msix_entries;
315 uint32_t cfg_eqn; 383 uint32_t cfg_eqn;
316 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 384 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
@@ -325,7 +393,6 @@ struct lpfc_sli4_hba {
325 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ 393 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
326 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ 394 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
327 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ 395 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
328 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
329 396
330 /* Setup information for various queue parameters */ 397 /* Setup information for various queue parameters */
331 int eq_esize; 398 int eq_esize;
@@ -360,7 +427,7 @@ struct lpfc_sli4_hba {
360 unsigned long *rpi_bmask; 427 unsigned long *rpi_bmask;
361 uint16_t rpi_count; 428 uint16_t rpi_count;
362 struct lpfc_sli4_flags sli4_flags; 429 struct lpfc_sli4_flags sli4_flags;
363 struct list_head sp_rspiocb_work_queue; 430 struct list_head sp_queue_event;
364 struct list_head sp_cqe_event_pool; 431 struct list_head sp_cqe_event_pool;
365 struct list_head sp_asynce_work_queue; 432 struct list_head sp_asynce_work_queue;
366 struct list_head sp_fcp_xri_aborted_work_queue; 433 struct list_head sp_fcp_xri_aborted_work_queue;
@@ -376,11 +443,18 @@ enum lpfc_sge_type {
376 SCSI_BUFF_TYPE 443 SCSI_BUFF_TYPE
377}; 444};
378 445
446enum lpfc_sgl_state {
447 SGL_FREED,
448 SGL_ALLOCATED,
449 SGL_XRI_ABORTED
450};
451
379struct lpfc_sglq { 452struct lpfc_sglq {
380 /* lpfc_sglqs are used in double linked lists */ 453 /* lpfc_sglqs are used in double linked lists */
381 struct list_head list; 454 struct list_head list;
382 struct list_head clist; 455 struct list_head clist;
383 enum lpfc_sge_type buff_type; /* is this a scsi sgl */ 456 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
457 enum lpfc_sgl_state state;
384 uint16_t iotag; /* pre-assigned IO tag */ 458 uint16_t iotag; /* pre-assigned IO tag */
385 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 459 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
386 struct sli4_sge *sgl; /* pre-assigned SGL */ 460 struct sli4_sge *sgl; /* pre-assigned SGL */
@@ -408,6 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
408void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); 482void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
409void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, 483void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
410 struct lpfc_mbx_sge *); 484 struct lpfc_mbx_sge *);
485int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
486 uint16_t);
411 487
412void lpfc_sli4_hba_reset(struct lpfc_hba *); 488void lpfc_sli4_hba_reset(struct lpfc_hba *);
413struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 489struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -450,6 +526,7 @@ int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
450void lpfc_sli4_free_rpi(struct lpfc_hba *, int); 526void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
451void lpfc_sli4_remove_rpis(struct lpfc_hba *); 527void lpfc_sli4_remove_rpis(struct lpfc_hba *);
452void lpfc_sli4_async_event_proc(struct lpfc_hba *); 528void lpfc_sli4_async_event_proc(struct lpfc_hba *);
529void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
453int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); 530int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
454void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); 531void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
455void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); 532void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
@@ -465,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
465uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); 542uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
466uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); 543uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
467void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); 544void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
468int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); 545int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
469void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); 546int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
547int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
548void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
549void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
550void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
551int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
470int lpfc_sli4_post_status_check(struct lpfc_hba *); 552int lpfc_sli4_post_status_check(struct lpfc_hba *);
471uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); 553uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
472 554
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9ae20af4bdb7..013deec5dae8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,8 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.4" 21#define LPFC_DRIVER_VERSION "8.3.10"
22
23#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
25#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 606efa767548..ffd575c379f3 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/slab.h>
29#include <linux/spinlock.h> 30#include <linux/spinlock.h>
30 31
31#include <scsi/scsi.h> 32#include <scsi/scsi.h>
@@ -123,7 +124,12 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
123 } 124 }
124 mb = &pmb->u.mb; 125 mb = &pmb->u.mb;
125 126
126 lpfc_read_sparam(phba, pmb, vport->vpi); 127 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
128 if (rc) {
129 mempool_free(pmb, phba->mbox_mem_pool);
130 return -ENOMEM;
131 }
132
127 /* 133 /*
128 * Grab buffer pointer and clear context1 so we can use 134 * Grab buffer pointer and clear context1 so we can use
129 * lpfc_sli_issue_box_wait 135 * lpfc_sli_issue_box_wait
@@ -389,7 +395,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
389 * by the port. 395 * by the port.
390 */ 396 */
391 if ((phba->sli_rev == LPFC_SLI_REV4) && 397 if ((phba->sli_rev == LPFC_SLI_REV4) &&
392 (pport->vfi_state & LPFC_VFI_REGISTERED)) { 398 (pport->fc_flag & FC_VFI_REGISTERED)) {
393 rc = lpfc_sli4_init_vpi(phba, vpi); 399 rc = lpfc_sli4_init_vpi(phba, vpi);
394 if (rc) { 400 if (rc) {
395 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 401 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -505,6 +511,7 @@ enable_vport(struct fc_vport *fc_vport)
505 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 511 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
506 struct lpfc_hba *phba = vport->phba; 512 struct lpfc_hba *phba = vport->phba;
507 struct lpfc_nodelist *ndlp = NULL; 513 struct lpfc_nodelist *ndlp = NULL;
514 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
508 515
509 if ((phba->link_state < LPFC_LINK_UP) || 516 if ((phba->link_state < LPFC_LINK_UP) ||
510 (phba->fc_topology == TOPOLOGY_LOOP)) { 517 (phba->fc_topology == TOPOLOGY_LOOP)) {
@@ -512,8 +519,10 @@ enable_vport(struct fc_vport *fc_vport)
512 return VPORT_OK; 519 return VPORT_OK;
513 } 520 }
514 521
522 spin_lock_irq(shost->host_lock);
515 vport->load_flag |= FC_LOADING; 523 vport->load_flag |= FC_LOADING;
516 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 524 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
525 spin_unlock_irq(shost->host_lock);
517 526
518 /* Use the Physical nodes Fabric NDLP to determine if the link is 527 /* Use the Physical nodes Fabric NDLP to determine if the link is
519 * up and ready to FDISC. 528 * up and ready to FDISC.
@@ -700,6 +709,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
700 } 709 }
701 spin_unlock_irq(&phba->ndlp_lock); 710 spin_unlock_irq(&phba->ndlp_lock);
702 } 711 }
712 if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
713 goto skip_logo;
703 vport->unreg_vpi_cmpl = VPORT_INVAL; 714 vport->unreg_vpi_cmpl = VPORT_INVAL;
704 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 715 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
705 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 716 if (!lpfc_issue_els_npiv_logo(vport, ndlp))