aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c120
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c112
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c220
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c279
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c86
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c345
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
17 files changed, 1084 insertions, 229 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e35a4c71eb9a..3482d5a5aed2 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -20,7 +20,6 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <scsi/scsi_host.h> 22#include <scsi/scsi_host.h>
23
24struct lpfc_sli2_slim; 23struct lpfc_sli2_slim;
25 24
26#define LPFC_PCI_DEV_LP 0x1 25#define LPFC_PCI_DEV_LP 0x1
@@ -49,7 +48,7 @@ struct lpfc_sli2_slim;
49#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt 48#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
50 queue depth change in millisecs */ 49 queue depth change in millisecs */
51#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */ 50#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
52#define LPFC_MIN_TGT_QDEPTH 100 51#define LPFC_MIN_TGT_QDEPTH 10
53#define LPFC_MAX_TGT_QDEPTH 0xFFFF 52#define LPFC_MAX_TGT_QDEPTH 0xFFFF
54 53
55#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data 54#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
@@ -376,6 +375,7 @@ struct lpfc_vport {
376#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timeout */ 375#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timeout */
377#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */ 376#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
378#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */ 377#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
378#define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */
379 379
380 struct timer_list fc_fdmitmo; 380 struct timer_list fc_fdmitmo;
381 struct timer_list els_tmofunc; 381 struct timer_list els_tmofunc;
@@ -400,6 +400,7 @@ struct lpfc_vport {
400 uint32_t cfg_max_luns; 400 uint32_t cfg_max_luns;
401 uint32_t cfg_enable_da_id; 401 uint32_t cfg_enable_da_id;
402 uint32_t cfg_max_scsicmpl_time; 402 uint32_t cfg_max_scsicmpl_time;
403 uint32_t cfg_tgt_queue_depth;
403 404
404 uint32_t dev_loss_tmo_changed; 405 uint32_t dev_loss_tmo_changed;
405 406
@@ -510,9 +511,9 @@ struct lpfc_hba {
510 void (*lpfc_stop_port) 511 void (*lpfc_stop_port)
511 (struct lpfc_hba *); 512 (struct lpfc_hba *);
512 int (*lpfc_hba_init_link) 513 int (*lpfc_hba_init_link)
513 (struct lpfc_hba *); 514 (struct lpfc_hba *, uint32_t);
514 int (*lpfc_hba_down_link) 515 int (*lpfc_hba_down_link)
515 (struct lpfc_hba *); 516 (struct lpfc_hba *, uint32_t);
516 517
517 /* SLI4 specific HBA data structure */ 518 /* SLI4 specific HBA data structure */
518 struct lpfc_sli4_hba sli4_hba; 519 struct lpfc_sli4_hba sli4_hba;
@@ -525,7 +526,6 @@ struct lpfc_hba {
525#define LPFC_SLI3_NPIV_ENABLED 0x02 526#define LPFC_SLI3_NPIV_ENABLED 0x02
526#define LPFC_SLI3_VPORT_TEARDOWN 0x04 527#define LPFC_SLI3_VPORT_TEARDOWN 0x04
527#define LPFC_SLI3_CRP_ENABLED 0x08 528#define LPFC_SLI3_CRP_ENABLED 0x08
528#define LPFC_SLI3_INB_ENABLED 0x10
529#define LPFC_SLI3_BG_ENABLED 0x20 529#define LPFC_SLI3_BG_ENABLED 0x20
530#define LPFC_SLI3_DSS_ENABLED 0x40 530#define LPFC_SLI3_DSS_ENABLED 0x40
531 uint32_t iocb_cmd_size; 531 uint32_t iocb_cmd_size;
@@ -557,9 +557,6 @@ struct lpfc_hba {
557 557
558 MAILBOX_t *mbox; 558 MAILBOX_t *mbox;
559 uint32_t *mbox_ext; 559 uint32_t *mbox_ext;
560 uint32_t *inb_ha_copy;
561 uint32_t *inb_counter;
562 uint32_t inb_last_counter;
563 uint32_t ha_copy; 560 uint32_t ha_copy;
564 struct _PCB *pcb; 561 struct _PCB *pcb;
565 struct _IOCB *IOCBs; 562 struct _IOCB *IOCBs;
@@ -628,6 +625,7 @@ struct lpfc_hba {
628 uint32_t cfg_hostmem_hgp; 625 uint32_t cfg_hostmem_hgp;
629 uint32_t cfg_log_verbose; 626 uint32_t cfg_log_verbose;
630 uint32_t cfg_aer_support; 627 uint32_t cfg_aer_support;
628 uint32_t cfg_iocb_cnt;
631 uint32_t cfg_suppress_link_up; 629 uint32_t cfg_suppress_link_up;
632#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ 630#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
633#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ 631#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
@@ -816,6 +814,9 @@ struct lpfc_hba {
816 814
817 uint8_t menlo_flag; /* menlo generic flags */ 815 uint8_t menlo_flag; /* menlo generic flags */
818#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */ 816#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
817 uint32_t iocb_cnt;
818 uint32_t iocb_max;
819 atomic_t sdev_cnt;
819}; 820};
820 821
821static inline struct Scsi_Host * 822static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index bf33b315f93e..ad05b266e950 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -25,6 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/aer.h> 26#include <linux/aer.h>
27#include <linux/gfp.h> 27#include <linux/gfp.h>
28#include <linux/kernel.h>
28 29
29#include <scsi/scsi.h> 30#include <scsi/scsi.h>
30#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
@@ -506,10 +507,10 @@ lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
506 507
507 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && 508 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
508 (phba->link_state == LPFC_LINK_DOWN)) 509 (phba->link_state == LPFC_LINK_DOWN))
509 status = phba->lpfc_hba_init_link(phba); 510 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
510 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && 511 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
511 (phba->link_state >= LPFC_LINK_UP)) 512 (phba->link_state >= LPFC_LINK_UP))
512 status = phba->lpfc_hba_down_link(phba); 513 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
513 514
514 if (status == 0) 515 if (status == 0)
515 return strlen(buf); 516 return strlen(buf);
@@ -864,7 +865,6 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
864 uint32_t *mrpi, uint32_t *arpi, 865 uint32_t *mrpi, uint32_t *arpi,
865 uint32_t *mvpi, uint32_t *avpi) 866 uint32_t *mvpi, uint32_t *avpi)
866{ 867{
867 struct lpfc_sli *psli = &phba->sli;
868 struct lpfc_mbx_read_config *rd_config; 868 struct lpfc_mbx_read_config *rd_config;
869 LPFC_MBOXQ_t *pmboxq; 869 LPFC_MBOXQ_t *pmboxq;
870 MAILBOX_t *pmb; 870 MAILBOX_t *pmb;
@@ -893,8 +893,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
893 pmb->mbxOwner = OWN_HOST; 893 pmb->mbxOwner = OWN_HOST;
894 pmboxq->context1 = NULL; 894 pmboxq->context1 = NULL;
895 895
896 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || 896 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
897 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
898 rc = MBX_NOT_FINISHED; 897 rc = MBX_NOT_FINISHED;
899 else 898 else
900 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 899 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -1797,12 +1796,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
1797 1796
1798 /* Validate and store the new name */ 1797 /* Validate and store the new name */
1799 for (i=0, j=0; i < 16; i++) { 1798 for (i=0, j=0; i < 16; i++) {
1800 if ((*buf >= 'a') && (*buf <= 'f')) 1799 int value;
1801 j = ((j << 4) | ((*buf++ -'a') + 10)); 1800
1802 else if ((*buf >= 'A') && (*buf <= 'F')) 1801 value = hex_to_bin(*buf++);
1803 j = ((j << 4) | ((*buf++ -'A') + 10)); 1802 if (value >= 0)
1804 else if ((*buf >= '0') && (*buf <= '9')) 1803 j = (j << 4) | value;
1805 j = ((j << 4) | (*buf++ -'0'));
1806 else 1804 else
1807 return -EINVAL; 1805 return -EINVAL;
1808 if (i % 2) { 1806 if (i % 2) {
@@ -1890,12 +1888,11 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
1890 1888
1891 /* Validate and store the new name */ 1889 /* Validate and store the new name */
1892 for (i=0, j=0; i < 16; i++) { 1890 for (i=0, j=0; i < 16; i++) {
1893 if ((*buf >= 'a') && (*buf <= 'f')) 1891 int value;
1894 j = ((j << 4) | ((*buf++ -'a') + 10)); 1892
1895 else if ((*buf >= 'A') && (*buf <= 'F')) 1893 value = hex_to_bin(*buf++);
1896 j = ((j << 4) | ((*buf++ -'A') + 10)); 1894 if (value >= 0)
1897 else if ((*buf >= '0') && (*buf <= '9')) 1895 j = (j << 4) | value;
1898 j = ((j << 4) | (*buf++ -'0'));
1899 else 1896 else
1900 return -EINVAL; 1897 return -EINVAL;
1901 if (i % 2) { 1898 if (i % 2) {
@@ -1949,6 +1946,59 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
1949LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, 1946LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
1950 LPFC_DELAY_INIT_LINK_INDEFINITELY, 1947 LPFC_DELAY_INIT_LINK_INDEFINITELY,
1951 "Suppress Link Up at initialization"); 1948 "Suppress Link Up at initialization");
1949/*
1950# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
1951# 1 - (1024)
1952# 2 - (2048)
1953# 3 - (3072)
1954# 4 - (4096)
1955# 5 - (5120)
1956*/
1957static ssize_t
1958lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
1959{
1960 struct Scsi_Host *shost = class_to_shost(dev);
1961 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
1962
1963 return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
1964}
1965
1966static DEVICE_ATTR(iocb_hw, S_IRUGO,
1967 lpfc_iocb_hw_show, NULL);
1968static ssize_t
1969lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
1970{
1971 struct Scsi_Host *shost = class_to_shost(dev);
1972 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
1973
1974 return snprintf(buf, PAGE_SIZE, "%d\n",
1975 phba->sli.ring[LPFC_ELS_RING].txq_max);
1976}
1977
1978static DEVICE_ATTR(txq_hw, S_IRUGO,
1979 lpfc_txq_hw_show, NULL);
1980static ssize_t
1981lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
1982 char *buf)
1983{
1984 struct Scsi_Host *shost = class_to_shost(dev);
1985 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
1986
1987 return snprintf(buf, PAGE_SIZE, "%d\n",
1988 phba->sli.ring[LPFC_ELS_RING].txcmplq_max);
1989}
1990
1991static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
1992 lpfc_txcmplq_hw_show, NULL);
1993
1994int lpfc_iocb_cnt = 2;
1995module_param(lpfc_iocb_cnt, int, 1);
1996MODULE_PARM_DESC(lpfc_iocb_cnt,
1997 "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
1998lpfc_param_show(iocb_cnt);
1999lpfc_param_init(iocb_cnt, 2, 1, 5);
2000static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
2001 lpfc_iocb_cnt_show, NULL);
1952 2002
1953/* 2003/*
1954# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 2004# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -2157,6 +2207,13 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
2157 "Max number of FCP commands we can queue to a specific LUN"); 2207 "Max number of FCP commands we can queue to a specific LUN");
2158 2208
2159/* 2209/*
2210# tgt_queue_depth: This parameter is used to limit the number of outstanding
2211# commands per target port. Value range is [10,65535]. Default value is 65535.
2212*/
2213LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
2214 "Max number of FCP commands we can queue to a specific target port");
2215
2216/*
2160# hba_queue_depth: This parameter is used to limit the number of outstanding 2217# hba_queue_depth: This parameter is used to limit the number of outstanding
2161# commands per lpfc HBA. Value range is [32,8192]. If this parameter 2218# commands per lpfc HBA. Value range is [32,8192]. If this parameter
2162# value is greater than the maximum number of exchanges supported by the HBA, 2219# value is greater than the maximum number of exchanges supported by the HBA,
@@ -2720,7 +2777,6 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = {
2720 .attr = { 2777 .attr = {
2721 .name = "lpfc_drvr_stat_data", 2778 .name = "lpfc_drvr_stat_data",
2722 .mode = S_IRUSR, 2779 .mode = S_IRUSR,
2723 .owner = THIS_MODULE,
2724 }, 2780 },
2725 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET, 2781 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
2726 .read = sysfs_drvr_stat_data_read, 2782 .read = sysfs_drvr_stat_data_read,
@@ -2890,9 +2946,6 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
2890 struct lpfc_hba *phba = vport->phba; 2946 struct lpfc_hba *phba = vport->phba;
2891 int val = 0, rc = -EINVAL; 2947 int val = 0, rc = -EINVAL;
2892 2948
2893 /* AER not supported on OC devices yet */
2894 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
2895 return -EPERM;
2896 if (!isdigit(buf[0])) 2949 if (!isdigit(buf[0]))
2897 return -EINVAL; 2950 return -EINVAL;
2898 if (sscanf(buf, "%i", &val) != 1) 2951 if (sscanf(buf, "%i", &val) != 1)
@@ -2965,12 +3018,6 @@ lpfc_param_show(aer_support)
2965static int 3018static int
2966lpfc_aer_support_init(struct lpfc_hba *phba, int val) 3019lpfc_aer_support_init(struct lpfc_hba *phba, int val)
2967{ 3020{
2968 /* AER not supported on OC devices yet */
2969 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
2970 phba->cfg_aer_support = 0;
2971 return -EPERM;
2972 }
2973
2974 if (val == 0 || val == 1) { 3021 if (val == 0 || val == 1) {
2975 phba->cfg_aer_support = val; 3022 phba->cfg_aer_support = val;
2976 return 0; 3023 return 0;
@@ -3015,9 +3062,6 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
3015 struct lpfc_hba *phba = vport->phba; 3062 struct lpfc_hba *phba = vport->phba;
3016 int val, rc = -1; 3063 int val, rc = -1;
3017 3064
3018 /* AER not supported on OC devices yet */
3019 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
3020 return -EPERM;
3021 if (!isdigit(buf[0])) 3065 if (!isdigit(buf[0]))
3022 return -EINVAL; 3066 return -EINVAL;
3023 if (sscanf(buf, "%i", &val) != 1) 3067 if (sscanf(buf, "%i", &val) != 1)
@@ -3083,7 +3127,7 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
3083 continue; 3127 continue;
3084 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3128 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3085 continue; 3129 continue;
3086 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 3130 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3087 } 3131 }
3088 spin_unlock_irq(shost->host_lock); 3132 spin_unlock_irq(shost->host_lock);
3089 return 0; 3133 return 0;
@@ -3287,6 +3331,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3287 &dev_attr_lpfc_temp_sensor, 3331 &dev_attr_lpfc_temp_sensor,
3288 &dev_attr_lpfc_log_verbose, 3332 &dev_attr_lpfc_log_verbose,
3289 &dev_attr_lpfc_lun_queue_depth, 3333 &dev_attr_lpfc_lun_queue_depth,
3334 &dev_attr_lpfc_tgt_queue_depth,
3290 &dev_attr_lpfc_hba_queue_depth, 3335 &dev_attr_lpfc_hba_queue_depth,
3291 &dev_attr_lpfc_peer_port_login, 3336 &dev_attr_lpfc_peer_port_login,
3292 &dev_attr_lpfc_nodev_tmo, 3337 &dev_attr_lpfc_nodev_tmo,
@@ -3334,6 +3379,10 @@ struct device_attribute *lpfc_hba_attrs[] = {
3334 &dev_attr_lpfc_aer_support, 3379 &dev_attr_lpfc_aer_support,
3335 &dev_attr_lpfc_aer_state_cleanup, 3380 &dev_attr_lpfc_aer_state_cleanup,
3336 &dev_attr_lpfc_suppress_link_up, 3381 &dev_attr_lpfc_suppress_link_up,
3382 &dev_attr_lpfc_iocb_cnt,
3383 &dev_attr_iocb_hw,
3384 &dev_attr_txq_hw,
3385 &dev_attr_txcmplq_hw,
3337 NULL, 3386 NULL,
3338}; 3387};
3339 3388
@@ -3344,6 +3393,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
3344 &dev_attr_lpfc_drvr_version, 3393 &dev_attr_lpfc_drvr_version,
3345 &dev_attr_lpfc_log_verbose, 3394 &dev_attr_lpfc_log_verbose,
3346 &dev_attr_lpfc_lun_queue_depth, 3395 &dev_attr_lpfc_lun_queue_depth,
3396 &dev_attr_lpfc_tgt_queue_depth,
3347 &dev_attr_lpfc_nodev_tmo, 3397 &dev_attr_lpfc_nodev_tmo,
3348 &dev_attr_lpfc_devloss_tmo, 3398 &dev_attr_lpfc_devloss_tmo,
3349 &dev_attr_lpfc_hba_queue_depth, 3399 &dev_attr_lpfc_hba_queue_depth,
@@ -4042,8 +4092,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
4042 pmboxq->context1 = NULL; 4092 pmboxq->context1 = NULL;
4043 pmboxq->vport = vport; 4093 pmboxq->vport = vport;
4044 4094
4045 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4095 if (vport->fc_flag & FC_OFFLINE_MODE)
4046 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
4047 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4096 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4048 else 4097 else
4049 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 4098 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -4067,8 +4116,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
4067 pmboxq->context1 = NULL; 4116 pmboxq->context1 = NULL;
4068 pmboxq->vport = vport; 4117 pmboxq->vport = vport;
4069 4118
4070 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4119 if (vport->fc_flag & FC_OFFLINE_MODE)
4071 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
4072 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4120 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4073 else 4121 else
4074 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 4122 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -4521,6 +4569,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4521 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 4569 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4522 lpfc_aer_support_init(phba, lpfc_aer_support); 4570 lpfc_aer_support_init(phba, lpfc_aer_support);
4523 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); 4571 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
4572 lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
4524 return; 4573 return;
4525} 4574}
4526 4575
@@ -4533,6 +4582,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
4533{ 4582{
4534 lpfc_log_verbose_init(vport, lpfc_log_verbose); 4583 lpfc_log_verbose_init(vport, lpfc_log_verbose);
4535 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); 4584 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
4585 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
4536 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); 4586 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
4537 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); 4587 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
4538 lpfc_peer_port_login_init(vport, lpfc_peer_port_login); 4588 lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index dcf088262b20..d521569e6620 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -377,6 +377,11 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
377 377
378 if (rc == IOCB_SUCCESS) 378 if (rc == IOCB_SUCCESS)
379 return 0; /* done for now */ 379 return 0; /* done for now */
380 else if (rc == IOCB_BUSY)
381 rc = EAGAIN;
382 else
383 rc = EIO;
384
380 385
381 /* iocb failed so cleanup */ 386 /* iocb failed so cleanup */
382 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 387 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
@@ -625,6 +630,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
625 lpfc_nlp_put(ndlp); 630 lpfc_nlp_put(ndlp);
626 if (rc == IOCB_SUCCESS) 631 if (rc == IOCB_SUCCESS)
627 return 0; /* done for now */ 632 return 0; /* done for now */
633 else if (rc == IOCB_BUSY)
634 rc = EAGAIN;
635 else
636 rc = EIO;
628 637
629 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 638 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
630 job->request_payload.sg_cnt, DMA_TO_DEVICE); 639 job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -953,10 +962,22 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
953 if (phba->sli_rev == LPFC_SLI_REV4) { 962 if (phba->sli_rev == LPFC_SLI_REV4) {
954 evt_dat->immed_dat = phba->ctx_idx; 963 evt_dat->immed_dat = phba->ctx_idx;
955 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 964 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
965 /* Provide warning for over-run of the ct_ctx array */
966 if (phba->ct_ctx[evt_dat->immed_dat].flags &
967 UNSOL_VALID)
968 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
969 "2717 CT context array entry "
970 "[%d] over-run: oxid:x%x, "
971 "sid:x%x\n", phba->ctx_idx,
972 phba->ct_ctx[
973 evt_dat->immed_dat].oxid,
974 phba->ct_ctx[
975 evt_dat->immed_dat].SID);
956 phba->ct_ctx[evt_dat->immed_dat].oxid = 976 phba->ct_ctx[evt_dat->immed_dat].oxid =
957 piocbq->iocb.ulpContext; 977 piocbq->iocb.ulpContext;
958 phba->ct_ctx[evt_dat->immed_dat].SID = 978 phba->ct_ctx[evt_dat->immed_dat].SID =
959 piocbq->iocb.un.rcvels.remoteID; 979 piocbq->iocb.un.rcvels.remoteID;
980 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
960 } else 981 } else
961 evt_dat->immed_dat = piocbq->iocb.ulpContext; 982 evt_dat->immed_dat = piocbq->iocb.ulpContext;
962 983
@@ -1314,6 +1335,21 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1314 rc = IOCB_ERROR; 1335 rc = IOCB_ERROR;
1315 goto issue_ct_rsp_exit; 1336 goto issue_ct_rsp_exit;
1316 } 1337 }
1338
1339 /* Check if the ndlp is active */
1340 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1341 rc = -IOCB_ERROR;
1342 goto issue_ct_rsp_exit;
1343 }
1344
1345 /* get a refernece count so the ndlp doesn't go away while
1346 * we respond
1347 */
1348 if (!lpfc_nlp_get(ndlp)) {
1349 rc = -IOCB_ERROR;
1350 goto issue_ct_rsp_exit;
1351 }
1352
1317 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1353 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1318 /* The exchange is done, mark the entry as invalid */ 1354 /* The exchange is done, mark the entry as invalid */
1319 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1355 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fbc9baeb6048..03f4ddc18572 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -41,6 +41,7 @@ void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
41void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 41void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
42int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 42int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
43 LPFC_MBOXQ_t *, uint32_t); 43 LPFC_MBOXQ_t *, uint32_t);
44void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
44void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
45void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 46void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
46void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 47void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
@@ -190,6 +191,7 @@ irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
190irqreturn_t lpfc_sli4_fp_intr_handler(int, void *); 191irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
191 192
192void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 193void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
194void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
193void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 195void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
194void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); 196void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
195void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 197void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -402,3 +404,12 @@ int lpfc_bsg_request(struct fc_bsg_job *);
402int lpfc_bsg_timeout(struct fc_bsg_job *); 404int lpfc_bsg_timeout(struct fc_bsg_job *);
403int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 405int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
404 struct lpfc_iocbq *); 406 struct lpfc_iocbq *);
407void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
408 struct lpfc_iocbq *);
409struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
410 struct lpfc_sli_ring *);
411int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
412 struct lpfc_iocbq *, uint32_t);
413uint32_t lpfc_drain_txq(struct lpfc_hba *);
414
415
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 36257a685509..7cae69de36f7 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -114,6 +114,8 @@ struct lpfc_nodelist {
114}; 114};
115 115
116/* Defines for nlp_flag (uint32) */ 116/* Defines for nlp_flag (uint32) */
117#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
118#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
117#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ 119#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
118#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ 120#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
119#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ 121#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c4c7f0ad7468..afbed6bc31f0 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -796,7 +796,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
796 * due to new FCF discovery 796 * due to new FCF discovery
797 */ 797 */
798 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 798 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
799 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 799 (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
800 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
801 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
800 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 802 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
801 "2611 FLOGI failed on registered " 803 "2611 FLOGI failed on registered "
802 "FCF record fcf_index:%d, trying " 804 "FCF record fcf_index:%d, trying "
@@ -811,18 +813,21 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
811 */ 813 */
812 lpfc_printf_log(phba, KERN_WARNING, 814 lpfc_printf_log(phba, KERN_WARNING,
813 LOG_FIP | LOG_ELS, 815 LOG_FIP | LOG_ELS,
814 "2760 FLOGI exhausted FCF " 816 "2760 Completed one round "
815 "round robin failover list, " 817 "of FLOGI FCF round robin "
816 "retry FLOGI on the current " 818 "failover list, retry FLOGI "
817 "registered FCF index:%d\n", 819 "on currently registered "
820 "FCF index:%d\n",
818 phba->fcf.current_rec.fcf_indx); 821 phba->fcf.current_rec.fcf_indx);
819 spin_lock_irq(&phba->hbalock);
820 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
821 spin_unlock_irq(&phba->hbalock);
822 } else { 822 } else {
823 lpfc_printf_log(phba, KERN_INFO,
824 LOG_FIP | LOG_ELS,
825 "2794 FLOGI FCF round robin "
826 "failover to FCF index x%x\n",
827 fcf_index);
823 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, 828 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
824 fcf_index); 829 fcf_index);
825 if (rc) { 830 if (rc)
826 lpfc_printf_log(phba, KERN_WARNING, 831 lpfc_printf_log(phba, KERN_WARNING,
827 LOG_FIP | LOG_ELS, 832 LOG_FIP | LOG_ELS,
828 "2761 FLOGI round " 833 "2761 FLOGI round "
@@ -831,10 +836,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
831 "rc:x%x, fcf_index:" 836 "rc:x%x, fcf_index:"
832 "%d\n", rc, 837 "%d\n", rc,
833 phba->fcf.current_rec.fcf_indx); 838 phba->fcf.current_rec.fcf_indx);
834 spin_lock_irq(&phba->hbalock); 839 else
835 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
836 spin_unlock_irq(&phba->hbalock);
837 } else
838 goto out; 840 goto out;
839 } 841 }
840 } 842 }
@@ -890,9 +892,39 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
890 */ 892 */
891 if (sp->cmn.fPort) 893 if (sp->cmn.fPort)
892 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 894 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
893 else 895 else if (!(phba->hba_flag & HBA_FCOE_SUPPORT))
894 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 896 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
895 897 else {
898 lpfc_printf_vlog(vport, KERN_ERR,
899 LOG_FIP | LOG_ELS,
900 "2831 FLOGI response with cleared Fabric "
901 "bit fcf_index 0x%x "
902 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
903 "Fabric Name "
904 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
905 phba->fcf.current_rec.fcf_indx,
906 phba->fcf.current_rec.switch_name[0],
907 phba->fcf.current_rec.switch_name[1],
908 phba->fcf.current_rec.switch_name[2],
909 phba->fcf.current_rec.switch_name[3],
910 phba->fcf.current_rec.switch_name[4],
911 phba->fcf.current_rec.switch_name[5],
912 phba->fcf.current_rec.switch_name[6],
913 phba->fcf.current_rec.switch_name[7],
914 phba->fcf.current_rec.fabric_name[0],
915 phba->fcf.current_rec.fabric_name[1],
916 phba->fcf.current_rec.fabric_name[2],
917 phba->fcf.current_rec.fabric_name[3],
918 phba->fcf.current_rec.fabric_name[4],
919 phba->fcf.current_rec.fabric_name[5],
920 phba->fcf.current_rec.fabric_name[6],
921 phba->fcf.current_rec.fabric_name[7]);
922 lpfc_nlp_put(ndlp);
923 spin_lock_irq(&phba->hbalock);
924 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
925 spin_unlock_irq(&phba->hbalock);
926 goto out;
927 }
896 if (!rc) { 928 if (!rc) {
897 /* Mark the FCF discovery process done */ 929 /* Mark the FCF discovery process done */
898 if (phba->hba_flag & HBA_FIP_SUPPORT) 930 if (phba->hba_flag & HBA_FIP_SUPPORT)
@@ -1472,8 +1504,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1472 } 1504 }
1473 goto out; 1505 goto out;
1474 } 1506 }
1475 /* PLOGI failed */ 1507 /* PLOGI failed Don't print the vport to vport rjts */
1476 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1508 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1509 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1510 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1511 (phba)->pport->cfg_log_verbose & LOG_ELS)
1512 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1477 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1513 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1478 ndlp->nlp_DID, irsp->ulpStatus, 1514 ndlp->nlp_DID, irsp->ulpStatus,
1479 irsp->un.ulpWord[4]); 1515 irsp->un.ulpWord[4]);
@@ -2740,6 +2776,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2740 retry = 1; 2776 retry = 1;
2741 break; 2777 break;
2742 } 2778 }
2779 if (stat.un.b.lsRjtRsnCodeExp ==
2780 LSEXP_CANT_GIVE_DATA) {
2781 if (cmd == ELS_CMD_PLOGI) {
2782 delay = 1000;
2783 maxretry = 48;
2784 }
2785 retry = 1;
2786 break;
2787 }
2743 if (cmd == ELS_CMD_PLOGI) { 2788 if (cmd == ELS_CMD_PLOGI) {
2744 delay = 1000; 2789 delay = 1000;
2745 maxretry = lpfc_max_els_tries + 1; 2790 maxretry = lpfc_max_els_tries + 1;
@@ -5135,6 +5180,7 @@ lpfc_els_timeout(unsigned long ptr)
5135 return; 5180 return;
5136} 5181}
5137 5182
5183
5138/** 5184/**
5139 * lpfc_els_timeout_handler - Process an els timeout event 5185 * lpfc_els_timeout_handler - Process an els timeout event
5140 * @vport: pointer to a virtual N_Port data structure. 5186 * @vport: pointer to a virtual N_Port data structure.
@@ -5155,13 +5201,19 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
5155 uint32_t els_command = 0; 5201 uint32_t els_command = 0;
5156 uint32_t timeout; 5202 uint32_t timeout;
5157 uint32_t remote_ID = 0xffffffff; 5203 uint32_t remote_ID = 0xffffffff;
5204 LIST_HEAD(txcmplq_completions);
5205 LIST_HEAD(abort_list);
5206
5158 5207
5159 spin_lock_irq(&phba->hbalock);
5160 timeout = (uint32_t)(phba->fc_ratov << 1); 5208 timeout = (uint32_t)(phba->fc_ratov << 1);
5161 5209
5162 pring = &phba->sli.ring[LPFC_ELS_RING]; 5210 pring = &phba->sli.ring[LPFC_ELS_RING];
5163 5211
5164 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 5212 spin_lock_irq(&phba->hbalock);
5213 list_splice_init(&pring->txcmplq, &txcmplq_completions);
5214 spin_unlock_irq(&phba->hbalock);
5215
5216 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
5165 cmd = &piocb->iocb; 5217 cmd = &piocb->iocb;
5166 5218
5167 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 5219 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
@@ -5198,13 +5250,22 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
5198 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 5250 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5199 remote_ID = ndlp->nlp_DID; 5251 remote_ID = ndlp->nlp_DID;
5200 } 5252 }
5253 list_add_tail(&piocb->dlist, &abort_list);
5254 }
5255 spin_lock_irq(&phba->hbalock);
5256 list_splice(&txcmplq_completions, &pring->txcmplq);
5257 spin_unlock_irq(&phba->hbalock);
5258
5259 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
5201 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 5260 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5202 "0127 ELS timeout Data: x%x x%x x%x " 5261 "0127 ELS timeout Data: x%x x%x x%x "
5203 "x%x\n", els_command, 5262 "x%x\n", els_command,
5204 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 5263 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
5264 spin_lock_irq(&phba->hbalock);
5265 list_del_init(&piocb->dlist);
5205 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 5266 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5267 spin_unlock_irq(&phba->hbalock);
5206 } 5268 }
5207 spin_unlock_irq(&phba->hbalock);
5208 5269
5209 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 5270 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
5210 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 5271 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
@@ -6901,6 +6962,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6901 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 6962 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6902 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6963 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6903 unsigned long iflag = 0; 6964 unsigned long iflag = 0;
6965 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6904 6966
6905 spin_lock_irqsave(&phba->hbalock, iflag); 6967 spin_lock_irqsave(&phba->hbalock, iflag);
6906 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 6968 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -6913,6 +6975,10 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6913 sglq_entry->state = SGL_FREED; 6975 sglq_entry->state = SGL_FREED;
6914 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 6976 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6915 spin_unlock_irqrestore(&phba->hbalock, iflag); 6977 spin_unlock_irqrestore(&phba->hbalock, iflag);
6978
6979 /* Check if TXQ queue needs to be serviced */
6980 if (pring->txq_cnt)
6981 lpfc_worker_wake_up(phba);
6916 return; 6982 return;
6917 } 6983 }
6918 } 6984 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 1f87b4fb8b50..0639c994349c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -275,7 +275,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
275 if (!(vport->load_flag & FC_UNLOADING) && 275 if (!(vport->load_flag & FC_UNLOADING) &&
276 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 276 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
277 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 277 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
278 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 278 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
279 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
280 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
279 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 281 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
280 282
281 lpfc_unregister_unused_fcf(phba); 283 lpfc_unregister_unused_fcf(phba);
@@ -586,6 +588,8 @@ lpfc_work_done(struct lpfc_hba *phba)
586 (status & 588 (status &
587 HA_RXMASK)); 589 HA_RXMASK));
588 } 590 }
591 if (pring->txq_cnt)
592 lpfc_drain_txq(phba);
589 /* 593 /*
590 * Turn on Ring interrupts 594 * Turn on Ring interrupts
591 */ 595 */
@@ -1297,7 +1301,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1297 * used for this FCF when the function returns. 1301 * used for this FCF when the function returns.
1298 * If the FCF record need to be used with a particular vlan id, the vlan is 1302 * If the FCF record need to be used with a particular vlan id, the vlan is
1299 * set in the vlan_id on return of the function. If not VLAN tagging need to 1303 * set in the vlan_id on return of the function. If not VLAN tagging need to
1300 * be used with the FCF vlan_id will be set to 0xFFFF; 1304 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1301 **/ 1305 **/
1302static int 1306static int
1303lpfc_match_fcf_conn_list(struct lpfc_hba *phba, 1307lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
@@ -1333,7 +1337,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1333 if (phba->valid_vlan) 1337 if (phba->valid_vlan)
1334 *vlan_id = phba->vlan_id; 1338 *vlan_id = phba->vlan_id;
1335 else 1339 else
1336 *vlan_id = 0xFFFF; 1340 *vlan_id = LPFC_FCOE_NULL_VID;
1337 return 1; 1341 return 1;
1338 } 1342 }
1339 1343
@@ -1357,7 +1361,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1357 if (fcf_vlan_id) 1361 if (fcf_vlan_id)
1358 *vlan_id = fcf_vlan_id; 1362 *vlan_id = fcf_vlan_id;
1359 else 1363 else
1360 *vlan_id = 0xFFFF; 1364 *vlan_id = LPFC_FCOE_NULL_VID;
1361 return 1; 1365 return 1;
1362 } 1366 }
1363 1367
@@ -1466,7 +1470,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1466 else if (fcf_vlan_id) 1470 else if (fcf_vlan_id)
1467 *vlan_id = fcf_vlan_id; 1471 *vlan_id = fcf_vlan_id;
1468 else 1472 else
1469 *vlan_id = 0xFFFF; 1473 *vlan_id = LPFC_FCOE_NULL_VID;
1470 1474
1471 return 1; 1475 return 1;
1472 } 1476 }
@@ -1518,6 +1522,9 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1518 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1522 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1519 * flag 1523 * flag
1520 */ 1524 */
1525 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1526 "2833 Stop FCF discovery process due to link "
1527 "state change (x%x)\n", phba->link_state);
1521 spin_lock_irq(&phba->hbalock); 1528 spin_lock_irq(&phba->hbalock);
1522 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1529 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1523 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1530 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
@@ -1565,7 +1572,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1565} 1572}
1566 1573
1567/** 1574/**
1568 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1575 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1569 * @phba: pointer to lpfc hba data structure. 1576 * @phba: pointer to lpfc hba data structure.
1570 * @mboxq: pointer to mailbox object. 1577 * @mboxq: pointer to mailbox object.
1571 * @next_fcf_index: pointer to holder of next fcf index. 1578 * @next_fcf_index: pointer to holder of next fcf index.
@@ -1693,6 +1700,37 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1693} 1700}
1694 1701
1695/** 1702/**
1703 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1704 * @phba: pointer to lpfc hba data structure.
1705 * @fcf_rec: pointer to an existing FCF record.
1706 * @new_fcf_record: pointer to a new FCF record.
1707 * @new_vlan_id: vlan id from the new FCF record.
1708 *
1709 * This function performs matching test of a new FCF record against an existing
1710 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1711 * will not be used as part of the FCF record matching criteria.
1712 *
1713 * Returns true if all the fields matching, otherwise returns false.
1714 */
1715static bool
1716lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1717 struct lpfc_fcf_rec *fcf_rec,
1718 struct fcf_record *new_fcf_record,
1719 uint16_t new_vlan_id)
1720{
1721 if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
1722 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
1723 return false;
1724 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
1725 return false;
1726 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
1727 return false;
1728 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1729 return false;
1730 return true;
1731}
1732
1733/**
1696 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 1734 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1697 * @phba: pointer to lpfc hba data structure. 1735 * @phba: pointer to lpfc hba data structure.
1698 * @mboxq: pointer to mailbox object. 1736 * @mboxq: pointer to mailbox object.
@@ -1755,7 +1793,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1755 */ 1793 */
1756 if (!rc) { 1794 if (!rc) {
1757 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1795 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1758 "2781 FCF record fcf_index:x%x failed FCF " 1796 "2781 FCF record (x%x) failed FCF "
1759 "connection list check, fcf_avail:x%x, " 1797 "connection list check, fcf_avail:x%x, "
1760 "fcf_valid:x%x\n", 1798 "fcf_valid:x%x\n",
1761 bf_get(lpfc_fcf_record_fcf_index, 1799 bf_get(lpfc_fcf_record_fcf_index,
@@ -1764,6 +1802,32 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1764 new_fcf_record), 1802 new_fcf_record),
1765 bf_get(lpfc_fcf_record_fcf_valid, 1803 bf_get(lpfc_fcf_record_fcf_valid,
1766 new_fcf_record)); 1804 new_fcf_record));
1805 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
1806 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
1807 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
1808 /*
1809 * In case the current in-use FCF record becomes
1810 * invalid/unavailable during FCF discovery that
1811 * was not triggered by fast FCF failover process,
1812 * treat it as fast FCF failover.
1813 */
1814 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
1815 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1816 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1817 "2835 Invalid in-use FCF "
1818 "record (x%x) reported, "
1819 "entering fast FCF failover "
1820 "mode scanning.\n",
1821 phba->fcf.current_rec.fcf_indx);
1822 spin_lock_irq(&phba->hbalock);
1823 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
1824 spin_unlock_irq(&phba->hbalock);
1825 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1826 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
1827 LPFC_FCOE_FCF_GET_FIRST);
1828 return;
1829 }
1830 }
1767 goto read_next_fcf; 1831 goto read_next_fcf;
1768 } else { 1832 } else {
1769 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1833 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
@@ -1780,14 +1844,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1780 */ 1844 */
1781 spin_lock_irq(&phba->hbalock); 1845 spin_lock_irq(&phba->hbalock);
1782 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1846 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1783 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1847 if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
1784 new_fcf_record) && 1848 new_fcf_record, vlan_id)) {
1785 lpfc_sw_name_match(phba->fcf.current_rec.switch_name,
1786 new_fcf_record) &&
1787 lpfc_mac_addr_match(phba->fcf.current_rec.mac_addr,
1788 new_fcf_record) &&
1789 lpfc_vlan_id_match(phba->fcf.current_rec.vlan_id,
1790 vlan_id)) {
1791 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1849 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1792 if (phba->fcf.fcf_flag & FCF_REDISC_PEND) 1850 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
1793 /* Stop FCF redisc wait timer if pending */ 1851 /* Stop FCF redisc wait timer if pending */
@@ -1797,6 +1855,13 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1797 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | 1855 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
1798 FCF_DISCOVERY); 1856 FCF_DISCOVERY);
1799 spin_unlock_irq(&phba->hbalock); 1857 spin_unlock_irq(&phba->hbalock);
1858 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1859 "2836 The new FCF record (x%x) "
1860 "matches the in-use FCF record "
1861 "(x%x)\n",
1862 phba->fcf.current_rec.fcf_indx,
1863 bf_get(lpfc_fcf_record_fcf_index,
1864 new_fcf_record));
1800 goto out; 1865 goto out;
1801 } 1866 }
1802 /* 1867 /*
@@ -1828,6 +1893,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1828 */ 1893 */
1829 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { 1894 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
1830 /* Choose this FCF record */ 1895 /* Choose this FCF record */
1896 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1897 "2837 Update current FCF record "
1898 "(x%x) with new FCF record (x%x)\n",
1899 fcf_rec->fcf_indx,
1900 bf_get(lpfc_fcf_record_fcf_index,
1901 new_fcf_record));
1831 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1902 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1832 addr_mode, vlan_id, BOOT_ENABLE); 1903 addr_mode, vlan_id, BOOT_ENABLE);
1833 spin_unlock_irq(&phba->hbalock); 1904 spin_unlock_irq(&phba->hbalock);
@@ -1848,6 +1919,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1848 */ 1919 */
1849 if (new_fcf_record->fip_priority < fcf_rec->priority) { 1920 if (new_fcf_record->fip_priority < fcf_rec->priority) {
1850 /* Choose the new FCF record with lower priority */ 1921 /* Choose the new FCF record with lower priority */
1922 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1923 "2838 Update current FCF record "
1924 "(x%x) with new FCF record (x%x)\n",
1925 fcf_rec->fcf_indx,
1926 bf_get(lpfc_fcf_record_fcf_index,
1927 new_fcf_record));
1851 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1928 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1852 addr_mode, vlan_id, 0); 1929 addr_mode, vlan_id, 0);
1853 /* Reset running random FCF selection count */ 1930 /* Reset running random FCF selection count */
@@ -1857,11 +1934,18 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1857 phba->fcf.eligible_fcf_cnt++; 1934 phba->fcf.eligible_fcf_cnt++;
1858 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, 1935 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
1859 phba->fcf.eligible_fcf_cnt); 1936 phba->fcf.eligible_fcf_cnt);
1860 if (select_new_fcf) 1937 if (select_new_fcf) {
1938 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1939 "2839 Update current FCF record "
1940 "(x%x) with new FCF record (x%x)\n",
1941 fcf_rec->fcf_indx,
1942 bf_get(lpfc_fcf_record_fcf_index,
1943 new_fcf_record));
1861 /* Choose the new FCF by random selection */ 1944 /* Choose the new FCF by random selection */
1862 __lpfc_update_fcf_record(phba, fcf_rec, 1945 __lpfc_update_fcf_record(phba, fcf_rec,
1863 new_fcf_record, 1946 new_fcf_record,
1864 addr_mode, vlan_id, 0); 1947 addr_mode, vlan_id, 0);
1948 }
1865 } 1949 }
1866 spin_unlock_irq(&phba->hbalock); 1950 spin_unlock_irq(&phba->hbalock);
1867 goto read_next_fcf; 1951 goto read_next_fcf;
@@ -1871,6 +1955,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1871 * initial best-fit FCF. 1955 * initial best-fit FCF.
1872 */ 1956 */
1873 if (fcf_rec) { 1957 if (fcf_rec) {
1958 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1959 "2840 Update current FCF record "
1960 "with initial FCF record (x%x)\n",
1961 bf_get(lpfc_fcf_record_fcf_index,
1962 new_fcf_record));
1874 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1963 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1875 addr_mode, vlan_id, (boot_flag ? 1964 addr_mode, vlan_id, (boot_flag ?
1876 BOOT_ENABLE : 0)); 1965 BOOT_ENABLE : 0));
@@ -1928,12 +2017,23 @@ read_next_fcf:
1928 lpfc_unregister_fcf(phba); 2017 lpfc_unregister_fcf(phba);
1929 2018
1930 /* Replace in-use record with the new record */ 2019 /* Replace in-use record with the new record */
2020 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2021 "2842 Replace the current in-use "
2022 "FCF record (x%x) with failover FCF "
2023 "record (x%x)\n",
2024 phba->fcf.current_rec.fcf_indx,
2025 phba->fcf.failover_rec.fcf_indx);
1931 memcpy(&phba->fcf.current_rec, 2026 memcpy(&phba->fcf.current_rec,
1932 &phba->fcf.failover_rec, 2027 &phba->fcf.failover_rec,
1933 sizeof(struct lpfc_fcf_rec)); 2028 sizeof(struct lpfc_fcf_rec));
1934 /* mark the FCF fast failover completed */ 2029 /*
2030 * Mark the fast FCF failover rediscovery completed
2031 * and the start of the first round of the roundrobin
2032 * FCF failover.
2033 */
1935 spin_lock_irq(&phba->hbalock); 2034 spin_lock_irq(&phba->hbalock);
1936 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2035 phba->fcf.fcf_flag &=
2036 ~(FCF_REDISC_FOV | FCF_REDISC_RRU);
1937 spin_unlock_irq(&phba->hbalock); 2037 spin_unlock_irq(&phba->hbalock);
1938 /* 2038 /*
1939 * Set up the initial registered FCF index for FLOGI 2039 * Set up the initial registered FCF index for FLOGI
@@ -1951,15 +2051,42 @@ read_next_fcf:
1951 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || 2051 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
1952 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2052 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
1953 return; 2053 return;
2054
2055 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2056 /*
2057 * In case the current in-use FCF record no
2058 * longer existed during FCF discovery that
2059 * was not triggered by fast FCF failover
2060 * process, treat it as fast FCF failover.
2061 */
2062 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2063 "2841 In-use FCF record (x%x) "
2064 "not reported, entering fast "
2065 "FCF failover mode scanning.\n",
2066 phba->fcf.current_rec.fcf_indx);
2067 spin_lock_irq(&phba->hbalock);
2068 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2069 spin_unlock_irq(&phba->hbalock);
2070 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2071 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2072 LPFC_FCOE_FCF_GET_FIRST);
2073 return;
2074 }
2075
1954 /* 2076 /*
1955 * Otherwise, initial scan or post linkdown rescan, 2077 * Otherwise, initial scan or post linkdown rescan,
1956 * register with the best FCF record found so far 2078 * register with the best FCF record found so far
1957 * through the FCF scanning process. 2079 * through the FCF scanning process.
1958 */ 2080 */
1959 2081
1960 /* mark the initial FCF discovery completed */ 2082 /*
2083 * Mark the initial FCF discovery completed and
2084 * the start of the first round of the roundrobin
2085 * FCF failover.
2086 */
1961 spin_lock_irq(&phba->hbalock); 2087 spin_lock_irq(&phba->hbalock);
1962 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 2088 phba->fcf.fcf_flag &=
2089 ~(FCF_INIT_DISC | FCF_REDISC_RRU);
1963 spin_unlock_irq(&phba->hbalock); 2090 spin_unlock_irq(&phba->hbalock);
1964 /* 2091 /*
1965 * Set up the initial registered FCF index for FLOGI 2092 * Set up the initial registered FCF index for FLOGI
@@ -2033,6 +2160,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2033 next_fcf_index); 2160 next_fcf_index);
2034 2161
2035 /* Upload new FCF record to the failover FCF record */ 2162 /* Upload new FCF record to the failover FCF record */
2163 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2164 "2834 Update the current FCF record (x%x) "
2165 "with the next FCF record (x%x)\n",
2166 phba->fcf.failover_rec.fcf_indx,
2167 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2036 spin_lock_irq(&phba->hbalock); 2168 spin_lock_irq(&phba->hbalock);
2037 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2169 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2038 new_fcf_record, addr_mode, vlan_id, 2170 new_fcf_record, addr_mode, vlan_id,
@@ -2050,7 +2182,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2050 2182
2051 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2183 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2052 "2783 FLOGI round robin FCF failover from FCF " 2184 "2783 FLOGI round robin FCF failover from FCF "
2053 "(index:x%x) to FCF (index:x%x).\n", 2185 "(x%x) to FCF (x%x).\n",
2054 current_fcf_index, 2186 current_fcf_index,
2055 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); 2187 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2056 2188
@@ -2084,7 +2216,7 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2084 goto out; 2216 goto out;
2085 2217
2086 /* If FCF discovery period is over, no need to proceed */ 2218 /* If FCF discovery period is over, no need to proceed */
2087 if (phba->fcf.fcf_flag & FCF_DISCOVERY) 2219 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2088 goto out; 2220 goto out;
2089 2221
2090 /* Parse the FCF record from the non-embedded mailbox command */ 2222 /* Parse the FCF record from the non-embedded mailbox command */
@@ -2715,11 +2847,35 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2715 struct lpfc_vport *vport = pmb->vport; 2847 struct lpfc_vport *vport = pmb->vport;
2716 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2848 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2717 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2849 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2850 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2718 2851
2719 pmb->context1 = NULL; 2852 pmb->context1 = NULL;
2720 2853
2721 /* Good status, call state machine */ 2854 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
2722 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); 2855 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
2856
2857 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
2858 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
2859 /* We rcvd a rscn after issuing this
2860 * mbox reg login, we may have cycled
2861 * back through the state and be
2862 * back at reg login state so this
2863 * mbox needs to be ignored becase
2864 * there is another reg login in
2865 * proccess.
2866 */
2867 spin_lock_irq(shost->host_lock);
2868 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
2869 spin_unlock_irq(shost->host_lock);
2870 if (phba->sli_rev == LPFC_SLI_REV4)
2871 lpfc_sli4_free_rpi(phba,
2872 pmb->u.mb.un.varRegLogin.rpi);
2873
2874 } else
2875 /* Good status, call state machine */
2876 lpfc_disc_state_machine(vport, ndlp, pmb,
2877 NLP_EVT_CMPL_REG_LOGIN);
2878
2723 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2879 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2724 kfree(mp); 2880 kfree(mp);
2725 mempool_free(pmb, phba->mbox_mem_pool); 2881 mempool_free(pmb, phba->mbox_mem_pool);
@@ -3427,7 +3583,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3427 kref_init(&ndlp->kref); 3583 kref_init(&ndlp->kref);
3428 NLP_INT_NODE_ACT(ndlp); 3584 NLP_INT_NODE_ACT(ndlp);
3429 atomic_set(&ndlp->cmd_pending, 0); 3585 atomic_set(&ndlp->cmd_pending, 0);
3430 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 3586 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3431} 3587}
3432 3588
3433struct lpfc_nodelist * 3589struct lpfc_nodelist *
@@ -3700,6 +3856,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3700 mempool_free(mbox, phba->mbox_mem_pool); 3856 mempool_free(mbox, phba->mbox_mem_pool);
3701 } 3857 }
3702 lpfc_no_rpi(phba, ndlp); 3858 lpfc_no_rpi(phba, ndlp);
3859
3703 ndlp->nlp_rpi = 0; 3860 ndlp->nlp_rpi = 0;
3704 ndlp->nlp_flag &= ~NLP_RPI_VALID; 3861 ndlp->nlp_flag &= ~NLP_RPI_VALID;
3705 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3862 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
@@ -3842,6 +3999,9 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3842 kfree(mp); 3999 kfree(mp);
3843 } 4000 }
3844 list_del(&mb->list); 4001 list_del(&mb->list);
4002 if (phba->sli_rev == LPFC_SLI_REV4)
4003 lpfc_sli4_free_rpi(phba,
4004 mb->u.mb.un.varRegLogin.rpi);
3845 mempool_free(mb, phba->mbox_mem_pool); 4005 mempool_free(mb, phba->mbox_mem_pool);
3846 /* We shall not invoke the lpfc_nlp_put to decrement 4006 /* We shall not invoke the lpfc_nlp_put to decrement
3847 * the ndlp reference count as we are in the process 4007 * the ndlp reference count as we are in the process
@@ -3883,6 +4043,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3883 4043
3884 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4044 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3885 if ((ndlp->nlp_flag & NLP_DEFER_RM) && 4045 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4046 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
3886 !(ndlp->nlp_flag & NLP_RPI_VALID)) { 4047 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
3887 /* For this case we need to cleanup the default rpi 4048 /* For this case we need to cleanup the default rpi
3888 * allocated by the firmware. 4049 * allocated by the firmware.
@@ -5180,13 +5341,16 @@ void
5180lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 5341lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
5181{ 5342{
5182 /* 5343 /*
5183 * If HBA is not running in FIP mode or if HBA does not support 5344 * If HBA is not running in FIP mode, if HBA does not support
5184 * FCoE or if FCF is not registered, do nothing. 5345 * FCoE, if FCF discovery is ongoing, or if FCF has not been
5346 * registered, do nothing.
5185 */ 5347 */
5186 spin_lock_irq(&phba->hbalock); 5348 spin_lock_irq(&phba->hbalock);
5187 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || 5349 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
5188 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 5350 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
5189 !(phba->hba_flag & HBA_FIP_SUPPORT)) { 5351 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
5352 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
5353 (phba->pport->port_state == LPFC_FLOGI)) {
5190 spin_unlock_irq(&phba->hbalock); 5354 spin_unlock_irq(&phba->hbalock);
5191 return; 5355 return;
5192 } 5356 }
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index e654d01dad24..f5dbf2be3eab 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1170,6 +1170,7 @@ typedef struct {
1170#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1170#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1171#define PCI_DEVICE_ID_TOMCAT 0x0714 1171#define PCI_DEVICE_ID_TOMCAT 0x0714
1172#define PCI_DEVICE_ID_FALCON 0xf180 1172#define PCI_DEVICE_ID_FALCON 0xf180
1173#define PCI_DEVICE_ID_BALIUS 0xe131
1173 1174
1174#define JEDEC_ID_ADDRESS 0x0080001c 1175#define JEDEC_ID_ADDRESS 0x0080001c
1175#define FIREFLY_JEDEC_ID 0x1ACC 1176#define FIREFLY_JEDEC_ID 0x1ACC
@@ -3014,18 +3015,10 @@ struct sli3_pgp {
3014 uint32_t hbq_get[16]; 3015 uint32_t hbq_get[16];
3015}; 3016};
3016 3017
3017struct sli3_inb_pgp {
3018 uint32_t ha_copy;
3019 uint32_t counter;
3020 struct lpfc_pgp port[MAX_RINGS];
3021 uint32_t hbq_get[16];
3022};
3023
3024union sli_var { 3018union sli_var {
3025 struct sli2_desc s2; 3019 struct sli2_desc s2;
3026 struct sli3_desc s3; 3020 struct sli3_desc s3;
3027 struct sli3_pgp s3_pgp; 3021 struct sli3_pgp s3_pgp;
3028 struct sli3_inb_pgp s3_inb_pgp;
3029}; 3022};
3030 3023
3031typedef struct { 3024typedef struct {
@@ -3132,6 +3125,14 @@ typedef struct {
3132#define IOERR_BUFFER_SHORTAGE 0x28 3125#define IOERR_BUFFER_SHORTAGE 0x28
3133#define IOERR_DEFAULT 0x29 3126#define IOERR_DEFAULT 0x29
3134#define IOERR_CNT 0x2A 3127#define IOERR_CNT 0x2A
3128#define IOERR_SLER_FAILURE 0x46
3129#define IOERR_SLER_CMD_RCV_FAILURE 0x47
3130#define IOERR_SLER_REC_RJT_ERR 0x48
3131#define IOERR_SLER_REC_SRR_RETRY_ERR 0x49
3132#define IOERR_SLER_SRR_RJT_ERR 0x4A
3133#define IOERR_SLER_RRQ_RJT_ERR 0x4C
3134#define IOERR_SLER_RRQ_RETRY_ERR 0x4D
3135#define IOERR_SLER_ABTS_ERR 0x4E
3135 3136
3136#define IOERR_DRVR_MASK 0x100 3137#define IOERR_DRVR_MASK 0x100
3137#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */ 3138#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index cd9697edf860..2786ee3b605d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -621,6 +621,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
621/** 621/**
622 * lpfc_hba_init_link - Initialize the FC link 622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure. 623 * @phba: pointer to lpfc hba data structure.
624 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
624 * 625 *
625 * This routine will issue the INIT_LINK mailbox command call. 626 * This routine will issue the INIT_LINK mailbox command call.
626 * It is available to other drivers through the lpfc_hba data 627 * It is available to other drivers through the lpfc_hba data
@@ -632,7 +633,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
632 * Any other value - error 633 * Any other value - error
633 **/ 634 **/
634int 635int
635lpfc_hba_init_link(struct lpfc_hba *phba) 636lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
636{ 637{
637 struct lpfc_vport *vport = phba->pport; 638 struct lpfc_vport *vport = phba->pport;
638 LPFC_MBOXQ_t *pmb; 639 LPFC_MBOXQ_t *pmb;
@@ -651,7 +652,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
651 phba->cfg_link_speed); 652 phba->cfg_link_speed);
652 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 653 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
653 lpfc_set_loopback_flag(phba); 654 lpfc_set_loopback_flag(phba);
654 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 655 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
655 if (rc != MBX_SUCCESS) { 656 if (rc != MBX_SUCCESS) {
656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657 "0498 Adapter failed to init, mbxCmd x%x " 658 "0498 Adapter failed to init, mbxCmd x%x "
@@ -664,17 +665,21 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
664 writel(0xffffffff, phba->HAregaddr); 665 writel(0xffffffff, phba->HAregaddr);
665 readl(phba->HAregaddr); /* flush */ 666 readl(phba->HAregaddr); /* flush */
666 phba->link_state = LPFC_HBA_ERROR; 667 phba->link_state = LPFC_HBA_ERROR;
667 if (rc != MBX_BUSY) 668 if (rc != MBX_BUSY || flag == MBX_POLL)
668 mempool_free(pmb, phba->mbox_mem_pool); 669 mempool_free(pmb, phba->mbox_mem_pool);
669 return -EIO; 670 return -EIO;
670 } 671 }
671 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 672 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
673 if (flag == MBX_POLL)
674 mempool_free(pmb, phba->mbox_mem_pool);
672 675
673 return 0; 676 return 0;
674} 677}
675 678
676/** 679/**
677 * lpfc_hba_down_link - this routine downs the FC link 680 * lpfc_hba_down_link - this routine downs the FC link
681 * @phba: pointer to lpfc hba data structure.
682 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
678 * 683 *
679 * This routine will issue the DOWN_LINK mailbox command call. 684 * This routine will issue the DOWN_LINK mailbox command call.
680 * It is available to other drivers through the lpfc_hba data 685 * It is available to other drivers through the lpfc_hba data
@@ -685,7 +690,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
685 * Any other value - error 690 * Any other value - error
686 **/ 691 **/
687int 692int
688lpfc_hba_down_link(struct lpfc_hba *phba) 693lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
689{ 694{
690 LPFC_MBOXQ_t *pmb; 695 LPFC_MBOXQ_t *pmb;
691 int rc; 696 int rc;
@@ -701,7 +706,7 @@ lpfc_hba_down_link(struct lpfc_hba *phba)
701 "0491 Adapter Link is disabled.\n"); 706 "0491 Adapter Link is disabled.\n");
702 lpfc_down_link(phba, pmb); 707 lpfc_down_link(phba, pmb);
703 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 708 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
704 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 709 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
705 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 710 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
706 lpfc_printf_log(phba, 711 lpfc_printf_log(phba,
707 KERN_ERR, LOG_INIT, 712 KERN_ERR, LOG_INIT,
@@ -711,6 +716,9 @@ lpfc_hba_down_link(struct lpfc_hba *phba)
711 mempool_free(pmb, phba->mbox_mem_pool); 716 mempool_free(pmb, phba->mbox_mem_pool);
712 return -EIO; 717 return -EIO;
713 } 718 }
719 if (flag == MBX_POLL)
720 mempool_free(pmb, phba->mbox_mem_pool);
721
714 return 0; 722 return 0;
715} 723}
716 724
@@ -1818,6 +1826,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1818 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1826 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1819 "EmulexSecure Fibre"}; 1827 "EmulexSecure Fibre"};
1820 break; 1828 break;
1829 case PCI_DEVICE_ID_BALIUS:
1830 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1831 "Fibre Channel Adapter"};
1832 break;
1821 default: 1833 default:
1822 m = (typeof(m)){"Unknown", "", ""}; 1834 m = (typeof(m)){"Unknown", "", ""};
1823 break; 1835 break;
@@ -2279,10 +2291,32 @@ static void
2279lpfc_block_mgmt_io(struct lpfc_hba * phba) 2291lpfc_block_mgmt_io(struct lpfc_hba * phba)
2280{ 2292{
2281 unsigned long iflag; 2293 unsigned long iflag;
2294 uint8_t actcmd = MBX_HEARTBEAT;
2295 unsigned long timeout;
2296
2282 2297
2283 spin_lock_irqsave(&phba->hbalock, iflag); 2298 spin_lock_irqsave(&phba->hbalock, iflag);
2284 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2299 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2300 if (phba->sli.mbox_active)
2301 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2285 spin_unlock_irqrestore(&phba->hbalock, iflag); 2302 spin_unlock_irqrestore(&phba->hbalock, iflag);
2303 /* Determine how long we might wait for the active mailbox
2304 * command to be gracefully completed by firmware.
2305 */
2306 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2307 jiffies;
2308 /* Wait for the outstnading mailbox command to complete */
2309 while (phba->sli.mbox_active) {
2310 /* Check active mailbox complete status every 2ms */
2311 msleep(2);
2312 if (time_after(jiffies, timeout)) {
2313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2314 "2813 Mgmt IO is Blocked %x "
2315 "- mbox cmd %x still active\n",
2316 phba->sli.sli_flag, actcmd);
2317 break;
2318 }
2319 }
2286} 2320}
2287 2321
2288/** 2322/**
@@ -3323,22 +3357,14 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3323 "evt_tag:x%x, fcf_index:x%x\n", 3357 "evt_tag:x%x, fcf_index:x%x\n",
3324 acqe_fcoe->event_tag, 3358 acqe_fcoe->event_tag,
3325 acqe_fcoe->index); 3359 acqe_fcoe->index);
3360 /* If the FCF discovery is in progress, do nothing. */
3326 spin_lock_irq(&phba->hbalock); 3361 spin_lock_irq(&phba->hbalock);
3327 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3362 if (phba->hba_flag & FCF_DISC_INPROGRESS) {
3328 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3329 /*
3330 * If the current FCF is in discovered state or
3331 * FCF discovery is in progress, do nothing.
3332 */
3333 spin_unlock_irq(&phba->hbalock); 3363 spin_unlock_irq(&phba->hbalock);
3334 break; 3364 break;
3335 } 3365 }
3336 3366 /* If fast FCF failover rescan event is pending, do nothing */
3337 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3367 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3338 /*
3339 * If fast FCF failover rescan event is pending,
3340 * do nothing.
3341 */
3342 spin_unlock_irq(&phba->hbalock); 3368 spin_unlock_irq(&phba->hbalock);
3343 break; 3369 break;
3344 } 3370 }
@@ -3359,7 +3385,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3359 acqe_fcoe->index); 3385 acqe_fcoe->index);
3360 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3386 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3361 } 3387 }
3362 3388 /* If the FCF has been in discovered state, do nothing. */
3389 spin_lock_irq(&phba->hbalock);
3390 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3391 spin_unlock_irq(&phba->hbalock);
3392 break;
3393 }
3394 spin_unlock_irq(&phba->hbalock);
3363 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3395 /* Otherwise, scan the entire FCF table and re-discover SAN */
3364 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3396 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3365 "2770 Start FCF table scan due to new FCF " 3397 "2770 Start FCF table scan due to new FCF "
@@ -4885,6 +4917,7 @@ lpfc_create_shost(struct lpfc_hba *phba)
4885 phba->fc_altov = FF_DEF_ALTOV; 4917 phba->fc_altov = FF_DEF_ALTOV;
4886 phba->fc_arbtov = FF_DEF_ARBTOV; 4918 phba->fc_arbtov = FF_DEF_ARBTOV;
4887 4919
4920 atomic_set(&phba->sdev_cnt, 0);
4888 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4921 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4889 if (!vport) 4922 if (!vport)
4890 return -ENODEV; 4923 return -ENODEV;
@@ -5533,9 +5566,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5533 mempool_free(pmb, phba->mbox_mem_pool); 5566 mempool_free(pmb, phba->mbox_mem_pool);
5534 5567
5535 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5568 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5536 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) 5569 if (phba->cfg_hba_queue_depth >
5570 (phba->sli4_hba.max_cfg_param.max_xri -
5571 lpfc_sli4_get_els_iocb_cnt(phba)))
5537 phba->cfg_hba_queue_depth = 5572 phba->cfg_hba_queue_depth =
5538 phba->sli4_hba.max_cfg_param.max_xri; 5573 phba->sli4_hba.max_cfg_param.max_xri -
5574 lpfc_sli4_get_els_iocb_cnt(phba);
5539 return rc; 5575 return rc;
5540} 5576}
5541 5577
@@ -6993,22 +7029,28 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
6993static int 7029static int
6994lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7030lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6995{ 7031{
6996 int rc, index; 7032 int vectors, rc, index;
6997 7033
6998 /* Set up MSI-X multi-message vectors */ 7034 /* Set up MSI-X multi-message vectors */
6999 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7035 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7000 phba->sli4_hba.msix_entries[index].entry = index; 7036 phba->sli4_hba.msix_entries[index].entry = index;
7001 7037
7002 /* Configure MSI-X capability structure */ 7038 /* Configure MSI-X capability structure */
7039 vectors = phba->sli4_hba.cfg_eqn;
7040enable_msix_vectors:
7003 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7041 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7004 phba->sli4_hba.cfg_eqn); 7042 vectors);
7005 if (rc) { 7043 if (rc > 1) {
7044 vectors = rc;
7045 goto enable_msix_vectors;
7046 } else if (rc) {
7006 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7047 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7007 "0484 PCI enable MSI-X failed (%d)\n", rc); 7048 "0484 PCI enable MSI-X failed (%d)\n", rc);
7008 goto msi_fail_out; 7049 goto msi_fail_out;
7009 } 7050 }
7051
7010 /* Log MSI-X vector assignment */ 7052 /* Log MSI-X vector assignment */
7011 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7053 for (index = 0; index < vectors; index++)
7012 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7054 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7013 "0489 MSI-X entry[%d]: vector=x%x " 7055 "0489 MSI-X entry[%d]: vector=x%x "
7014 "message=%d\n", index, 7056 "message=%d\n", index,
@@ -7030,7 +7072,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7030 } 7072 }
7031 7073
7032 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7074 /* The rest of the vector(s) are associated to fast-path handler(s) */
7033 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { 7075 for (index = 1; index < vectors; index++) {
7034 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7076 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7035 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7077 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7036 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7078 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
@@ -7044,6 +7086,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7044 goto cfg_fail_out; 7086 goto cfg_fail_out;
7045 } 7087 }
7046 } 7088 }
7089 phba->sli4_hba.msix_vec_nr = vectors;
7047 7090
7048 return rc; 7091 return rc;
7049 7092
@@ -7077,9 +7120,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7077 /* Free up MSI-X multi-message vectors */ 7120 /* Free up MSI-X multi-message vectors */
7078 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7121 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7079 7122
7080 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) 7123 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7081 free_irq(phba->sli4_hba.msix_entries[index].vector, 7124 free_irq(phba->sli4_hba.msix_entries[index].vector,
7082 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7125 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7126
7083 /* Disable MSI-X */ 7127 /* Disable MSI-X */
7084 pci_disable_msix(phba->pcidev); 7128 pci_disable_msix(phba->pcidev);
7085 7129
@@ -7121,6 +7165,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7121 pci_disable_msi(phba->pcidev); 7165 pci_disable_msi(phba->pcidev);
7122 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7166 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7123 "0490 MSI request_irq failed (%d)\n", rc); 7167 "0490 MSI request_irq failed (%d)\n", rc);
7168 return rc;
7124 } 7169 }
7125 7170
7126 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7171 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
@@ -7128,7 +7173,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7128 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7173 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7129 } 7174 }
7130 7175
7131 return rc; 7176 return 0;
7132} 7177}
7133 7178
7134/** 7179/**
@@ -7839,6 +7884,9 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7840 "2710 PCI channel disable preparing for reset\n"); 7885 "2710 PCI channel disable preparing for reset\n");
7841 7886
7887 /* Block any management I/Os to the device */
7888 lpfc_block_mgmt_io(phba);
7889
7842 /* Block all SCSI devices' I/Os on the host */ 7890 /* Block all SCSI devices' I/Os on the host */
7843 lpfc_scsi_dev_block(phba); 7891 lpfc_scsi_dev_block(phba);
7844 7892
@@ -7848,6 +7896,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7848 /* Disable interrupt and pci device */ 7896 /* Disable interrupt and pci device */
7849 lpfc_sli_disable_intr(phba); 7897 lpfc_sli_disable_intr(phba);
7850 pci_disable_device(phba->pcidev); 7898 pci_disable_device(phba->pcidev);
7899
7851 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 7900 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
7852 lpfc_sli_flush_fcp_rings(phba); 7901 lpfc_sli_flush_fcp_rings(phba);
7853} 7902}
@@ -7861,7 +7910,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7861 * pending I/Os. 7910 * pending I/Os.
7862 **/ 7911 **/
7863static void 7912static void
7864lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) 7913lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7865{ 7914{
7866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7867 "2711 PCI channel permanent disable for failure\n"); 7916 "2711 PCI channel permanent disable for failure\n");
@@ -7910,7 +7959,7 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7910 return PCI_ERS_RESULT_NEED_RESET; 7959 return PCI_ERS_RESULT_NEED_RESET;
7911 case pci_channel_io_perm_failure: 7960 case pci_channel_io_perm_failure:
7912 /* Permanent failure, prepare for device down */ 7961 /* Permanent failure, prepare for device down */
7913 lpfc_prep_dev_for_perm_failure(phba); 7962 lpfc_sli_prep_dev_for_perm_failure(phba);
7914 return PCI_ERS_RESULT_DISCONNECT; 7963 return PCI_ERS_RESULT_DISCONNECT;
7915 default: 7964 default:
7916 /* Unknown state, prepare and request slot reset */ 7965 /* Unknown state, prepare and request slot reset */
@@ -7979,7 +8028,8 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7979 } else 8028 } else
7980 phba->intr_mode = intr_mode; 8029 phba->intr_mode = intr_mode;
7981 8030
7982 /* Take device offline; this will perform cleanup */ 8031 /* Take device offline, it will perform cleanup */
8032 lpfc_offline_prep(phba);
7983 lpfc_offline(phba); 8033 lpfc_offline(phba);
7984 lpfc_sli_brdrestart(phba); 8034 lpfc_sli_brdrestart(phba);
7985 8035
@@ -8110,8 +8160,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8110 } 8160 }
8111 8161
8112 /* Initialize and populate the iocb list per host */ 8162 /* Initialize and populate the iocb list per host */
8113 error = lpfc_init_iocb_list(phba, 8163
8114 phba->sli4_hba.max_cfg_param.max_xri); 8164 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8165 "2821 initialize iocb list %d.\n",
8166 phba->cfg_iocb_cnt*1024);
8167 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
8168
8115 if (error) { 8169 if (error) {
8116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8117 "1413 Failed to initialize iocb list.\n"); 8171 "1413 Failed to initialize iocb list.\n");
@@ -8160,6 +8214,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8160 /* Default to single FCP EQ for non-MSI-X */ 8214 /* Default to single FCP EQ for non-MSI-X */
8161 if (phba->intr_type != MSIX) 8215 if (phba->intr_type != MSIX)
8162 phba->cfg_fcp_eq_count = 1; 8216 phba->cfg_fcp_eq_count = 1;
8217 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
8218 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
8163 /* Set up SLI-4 HBA */ 8219 /* Set up SLI-4 HBA */
8164 if (lpfc_sli4_hba_setup(phba)) { 8220 if (lpfc_sli4_hba_setup(phba)) {
8165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8321,7 +8377,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8321 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8322 8378
8323 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8379 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8324 "0298 PCI device Power Management suspend.\n"); 8380 "2843 PCI device Power Management suspend.\n");
8325 8381
8326 /* Bring down the device */ 8382 /* Bring down the device */
8327 lpfc_offline_prep(phba); 8383 lpfc_offline_prep(phba);
@@ -8412,6 +8468,84 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8412} 8468}
8413 8469
8414/** 8470/**
8471 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
8472 * @phba: pointer to lpfc hba data structure.
8473 *
8474 * This routine is called to prepare the SLI4 device for PCI slot recover. It
8475 * aborts all the outstanding SCSI I/Os to the pci device.
8476 **/
8477static void
8478lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
8479{
8480 struct lpfc_sli *psli = &phba->sli;
8481 struct lpfc_sli_ring *pring;
8482
8483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8484 "2828 PCI channel I/O abort preparing for recovery\n");
8485 /*
8486 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8487 * and let the SCSI mid-layer to retry them to recover.
8488 */
8489 pring = &psli->ring[psli->fcp_ring];
8490 lpfc_sli_abort_iocb_ring(phba, pring);
8491}
8492
8493/**
8494 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
8495 * @phba: pointer to lpfc hba data structure.
8496 *
8497 * This routine is called to prepare the SLI4 device for PCI slot reset. It
8498 * disables the device interrupt and pci device, and aborts the internal FCP
8499 * pending I/Os.
8500 **/
8501static void
8502lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
8503{
8504 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8505 "2826 PCI channel disable preparing for reset\n");
8506
8507 /* Block any management I/Os to the device */
8508 lpfc_block_mgmt_io(phba);
8509
8510 /* Block all SCSI devices' I/Os on the host */
8511 lpfc_scsi_dev_block(phba);
8512
8513 /* stop all timers */
8514 lpfc_stop_hba_timers(phba);
8515
8516 /* Disable interrupt and pci device */
8517 lpfc_sli4_disable_intr(phba);
8518 pci_disable_device(phba->pcidev);
8519
8520 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
8521 lpfc_sli_flush_fcp_rings(phba);
8522}
8523
8524/**
8525 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
8526 * @phba: pointer to lpfc hba data structure.
8527 *
8528 * This routine is called to prepare the SLI4 device for PCI slot permanently
8529 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8530 * pending I/Os.
8531 **/
8532static void
8533lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8534{
8535 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8536 "2827 PCI channel permanent disable for failure\n");
8537
8538 /* Block all SCSI devices' I/Os on the host */
8539 lpfc_scsi_dev_block(phba);
8540
8541 /* stop all timers */
8542 lpfc_stop_hba_timers(phba);
8543
8544 /* Clean up all driver's outstanding SCSI I/Os */
8545 lpfc_sli_flush_fcp_rings(phba);
8546}
8547
8548/**
8415 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 8549 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8416 * @pdev: pointer to PCI device. 8550 * @pdev: pointer to PCI device.
8417 * @state: the current PCI connection state. 8551 * @state: the current PCI connection state.
@@ -8430,7 +8564,29 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8430static pci_ers_result_t 8564static pci_ers_result_t
8431lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 8565lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8432{ 8566{
8433 return PCI_ERS_RESULT_NEED_RESET; 8567 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8568 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8569
8570 switch (state) {
8571 case pci_channel_io_normal:
8572 /* Non-fatal error, prepare for recovery */
8573 lpfc_sli4_prep_dev_for_recover(phba);
8574 return PCI_ERS_RESULT_CAN_RECOVER;
8575 case pci_channel_io_frozen:
8576 /* Fatal error, prepare for slot reset */
8577 lpfc_sli4_prep_dev_for_reset(phba);
8578 return PCI_ERS_RESULT_NEED_RESET;
8579 case pci_channel_io_perm_failure:
8580 /* Permanent failure, prepare for device down */
8581 lpfc_sli4_prep_dev_for_perm_failure(phba);
8582 return PCI_ERS_RESULT_DISCONNECT;
8583 default:
8584 /* Unknown state, prepare and request slot reset */
8585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8586 "2825 Unknown PCI error state: x%x\n", state);
8587 lpfc_sli4_prep_dev_for_reset(phba);
8588 return PCI_ERS_RESULT_NEED_RESET;
8589 }
8434} 8590}
8435 8591
8436/** 8592/**
@@ -8454,6 +8610,39 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8454static pci_ers_result_t 8610static pci_ers_result_t
8455lpfc_io_slot_reset_s4(struct pci_dev *pdev) 8611lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8456{ 8612{
8613 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8614 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8615 struct lpfc_sli *psli = &phba->sli;
8616 uint32_t intr_mode;
8617
8618 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8619 if (pci_enable_device_mem(pdev)) {
8620 printk(KERN_ERR "lpfc: Cannot re-enable "
8621 "PCI device after reset.\n");
8622 return PCI_ERS_RESULT_DISCONNECT;
8623 }
8624
8625 pci_restore_state(pdev);
8626 if (pdev->is_busmaster)
8627 pci_set_master(pdev);
8628
8629 spin_lock_irq(&phba->hbalock);
8630 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8631 spin_unlock_irq(&phba->hbalock);
8632
8633 /* Configure and enable interrupt */
8634 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8635 if (intr_mode == LPFC_INTR_ERROR) {
8636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8637 "2824 Cannot re-enable interrupt after "
8638 "slot reset.\n");
8639 return PCI_ERS_RESULT_DISCONNECT;
8640 } else
8641 phba->intr_mode = intr_mode;
8642
8643 /* Log the current active interrupt mode */
8644 lpfc_log_intr_mode(phba, phba->intr_mode);
8645
8457 return PCI_ERS_RESULT_RECOVERED; 8646 return PCI_ERS_RESULT_RECOVERED;
8458} 8647}
8459 8648
@@ -8470,7 +8659,27 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8470static void 8659static void
8471lpfc_io_resume_s4(struct pci_dev *pdev) 8660lpfc_io_resume_s4(struct pci_dev *pdev)
8472{ 8661{
8473 return; 8662 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8663 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8664
8665 /*
8666 * In case of slot reset, as function reset is performed through
8667 * mailbox command which needs DMA to be enabled, this operation
8668 * has to be moved to the io resume phase. Taking device offline
8669 * will perform the necessary cleanup.
8670 */
8671 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
8672 /* Perform device reset */
8673 lpfc_offline_prep(phba);
8674 lpfc_offline(phba);
8675 lpfc_sli_brdrestart(phba);
8676 /* Bring the device back online */
8677 lpfc_online(phba);
8678 }
8679
8680 /* Clean up Advanced Error Reporting (AER) if needed */
8681 if (phba->hba_flag & HBA_AER_ENABLED)
8682 pci_cleanup_aer_uncorrect_error_status(pdev);
8474} 8683}
8475 8684
8476/** 8685/**
@@ -8802,6 +9011,8 @@ static struct pci_device_id lpfc_id_table[] = {
8802 PCI_ANY_ID, PCI_ANY_ID, }, 9011 PCI_ANY_ID, PCI_ANY_ID, },
8803 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9012 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8804 PCI_ANY_ID, PCI_ANY_ID, }, 9013 PCI_ANY_ID, PCI_ANY_ID, },
9014 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9015 PCI_ANY_ID, PCI_ANY_ID, },
8805 { 0 } 9016 { 0 }
8806}; 9017};
8807 9018
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e84dc33ca201..9c2c7c7140c7 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -955,6 +955,26 @@ lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
955 return; 955 return;
956} 956}
957 957
958void
959lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
960{
961 MAILBOX_t *mb = &pmb->u.mb;
962 struct lpfc_mqe *mqe;
963
964 switch (mb->mbxCommand) {
965 case MBX_READ_REV:
966 mqe = &pmb->u.mqe;
967 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
968 mqe->un.read_rev.fw_name, 16);
969 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
970 mqe->un.read_rev.ulp_fw_name, 16);
971 break;
972 default:
973 break;
974 }
975 return;
976}
977
958/** 978/**
959 * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2 979 * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2
960 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. 980 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
@@ -1199,7 +1219,6 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1199 mb->un.varCfgPort.cdss = 1; /* Configure Security */ 1219 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1200 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1220 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1201 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1221 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1202 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
1203 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1222 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1204 if (phba->max_vpi && phba->cfg_enable_npiv && 1223 if (phba->max_vpi && phba->cfg_enable_npiv &&
1205 phba->vpd.sli3Feat.cmv) { 1224 phba->vpd.sli3Feat.cmv) {
@@ -2026,7 +2045,7 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2026 phba->fcf.current_rec.fcf_indx); 2045 phba->fcf.current_rec.fcf_indx);
2027 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ 2046 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
2028 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3); 2047 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
2029 if (phba->fcf.current_rec.vlan_id != 0xFFFF) { 2048 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2030 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); 2049 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2031 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, 2050 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2032 phba->fcf.current_rec.vlan_id); 2051 phba->fcf.current_rec.vlan_id);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b90820a699fd..bccc9c66fa37 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -190,6 +190,7 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
190} 190}
191 191
192 192
193
193/* 194/*
194 * Free resources / clean up outstanding I/Os 195 * Free resources / clean up outstanding I/Os
195 * associated with a LPFC_NODELIST entry. This 196 * associated with a LPFC_NODELIST entry. This
@@ -199,13 +200,15 @@ int
199lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 200lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
200{ 201{
201 LIST_HEAD(completions); 202 LIST_HEAD(completions);
203 LIST_HEAD(txcmplq_completions);
204 LIST_HEAD(abort_list);
202 struct lpfc_sli *psli = &phba->sli; 205 struct lpfc_sli *psli = &phba->sli;
203 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 206 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
204 struct lpfc_iocbq *iocb, *next_iocb; 207 struct lpfc_iocbq *iocb, *next_iocb;
205 208
206 /* Abort outstanding I/O on NPort <nlp_DID> */ 209 /* Abort outstanding I/O on NPort <nlp_DID> */
207 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 210 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
208 "0205 Abort outstanding I/O on NPort x%x " 211 "2819 Abort outstanding I/O on NPort x%x "
209 "Data: x%x x%x x%x\n", 212 "Data: x%x x%x x%x\n",
210 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 213 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
211 ndlp->nlp_rpi); 214 ndlp->nlp_rpi);
@@ -224,14 +227,25 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
224 } 227 }
225 228
226 /* Next check the txcmplq */ 229 /* Next check the txcmplq */
227 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 230 list_splice_init(&pring->txcmplq, &txcmplq_completions);
231 spin_unlock_irq(&phba->hbalock);
232
233 list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
228 /* Check to see if iocb matches the nport we are looking for */ 234 /* Check to see if iocb matches the nport we are looking for */
229 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 235 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
230 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 236 list_add_tail(&iocb->dlist, &abort_list);
231 }
232 } 237 }
238 spin_lock_irq(&phba->hbalock);
239 list_splice(&txcmplq_completions, &pring->txcmplq);
233 spin_unlock_irq(&phba->hbalock); 240 spin_unlock_irq(&phba->hbalock);
234 241
242 list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
243 spin_lock_irq(&phba->hbalock);
244 list_del_init(&iocb->dlist);
245 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
246 spin_unlock_irq(&phba->hbalock);
247 }
248
235 /* Cancel all the IOCBs from the completions list */ 249 /* Cancel all the IOCBs from the completions list */
236 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 250 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
237 IOERR_SLI_ABORTED); 251 IOERR_SLI_ABORTED);
@@ -626,7 +640,8 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
626 if (!(vport->fc_flag & FC_PT2PT)) { 640 if (!(vport->fc_flag & FC_PT2PT)) {
627 /* Check config parameter use-adisc or FCP-2 */ 641 /* Check config parameter use-adisc or FCP-2 */
628 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || 642 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
629 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 643 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
644 (ndlp->nlp_type & NLP_FCP_TARGET))) {
630 spin_lock_irq(shost->host_lock); 645 spin_lock_irq(shost->host_lock);
631 ndlp->nlp_flag |= NLP_NPR_ADISC; 646 ndlp->nlp_flag |= NLP_NPR_ADISC;
632 spin_unlock_irq(shost->host_lock); 647 spin_unlock_irq(shost->host_lock);
@@ -962,6 +977,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
962 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; 977 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
963 break; 978 break;
964 default: 979 default:
980 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
965 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 981 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
966 } 982 }
967 mbox->context2 = lpfc_nlp_get(ndlp); 983 mbox->context2 = lpfc_nlp_get(ndlp);
@@ -972,6 +988,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
972 NLP_STE_REG_LOGIN_ISSUE); 988 NLP_STE_REG_LOGIN_ISSUE);
973 return ndlp->nlp_state; 989 return ndlp->nlp_state;
974 } 990 }
991 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
992 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
975 /* decrement node reference count to the failed mbox 993 /* decrement node reference count to the failed mbox
976 * command 994 * command
977 */ 995 */
@@ -1458,6 +1476,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1458 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1476 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1459 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1477 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1460 spin_lock_irq(shost->host_lock); 1478 spin_lock_irq(shost->host_lock);
1479 ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1461 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1480 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1462 spin_unlock_irq(shost->host_lock); 1481 spin_unlock_irq(shost->host_lock);
1463 lpfc_disc_set_adisc(vport, ndlp); 1482 lpfc_disc_set_adisc(vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f4a3b2e79eea..c818a7255962 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -623,6 +623,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
623 unsigned long iflag = 0; 623 unsigned long iflag = 0;
624 struct lpfc_iocbq *iocbq; 624 struct lpfc_iocbq *iocbq;
625 int i; 625 int i;
626 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
626 627
627 spin_lock_irqsave(&phba->hbalock, iflag); 628 spin_lock_irqsave(&phba->hbalock, iflag);
628 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 629 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
@@ -651,6 +652,8 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
651 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 652 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
652 psb->exch_busy = 0; 653 psb->exch_busy = 0;
653 spin_unlock_irqrestore(&phba->hbalock, iflag); 654 spin_unlock_irqrestore(&phba->hbalock, iflag);
655 if (pring->txq_cnt)
656 lpfc_worker_wake_up(phba);
654 return; 657 return;
655 658
656 } 659 }
@@ -747,7 +750,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
747 int status = 0, index; 750 int status = 0, index;
748 int bcnt; 751 int bcnt;
749 int non_sequential_xri = 0; 752 int non_sequential_xri = 0;
750 int rc = 0;
751 LIST_HEAD(sblist); 753 LIST_HEAD(sblist);
752 754
753 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 755 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
@@ -774,6 +776,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
774 /* Allocate iotag for psb->cur_iocbq. */ 776 /* Allocate iotag for psb->cur_iocbq. */
775 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 777 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
776 if (iotag == 0) { 778 if (iotag == 0) {
779 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
780 psb->data, psb->dma_handle);
777 kfree(psb); 781 kfree(psb);
778 break; 782 break;
779 } 783 }
@@ -858,7 +862,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
858 if (status) { 862 if (status) {
859 /* Put this back on the abort scsi list */ 863 /* Put this back on the abort scsi list */
860 psb->exch_busy = 1; 864 psb->exch_busy = 1;
861 rc++;
862 } else { 865 } else {
863 psb->exch_busy = 0; 866 psb->exch_busy = 0;
864 psb->status = IOSTAT_SUCCESS; 867 psb->status = IOSTAT_SUCCESS;
@@ -877,7 +880,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
877 if (status) { 880 if (status) {
878 /* Put this back on the abort scsi list */ 881 /* Put this back on the abort scsi list */
879 psb->exch_busy = 1; 882 psb->exch_busy = 1;
880 rc++;
881 } else { 883 } else {
882 psb->exch_busy = 0; 884 psb->exch_busy = 0;
883 psb->status = IOSTAT_SUCCESS; 885 psb->status = IOSTAT_SUCCESS;
@@ -887,7 +889,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
887 } 889 }
888 } 890 }
889 891
890 return bcnt + non_sequential_xri - rc; 892 return bcnt + non_sequential_xri;
891} 893}
892 894
893/** 895/**
@@ -1323,6 +1325,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1323 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1325 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1324 pde5->reftag = reftag; 1326 pde5->reftag = reftag;
1325 1327
1328 /* Endian convertion if necessary for PDE5 */
1329 pde5->word0 = cpu_to_le32(pde5->word0);
1330 pde5->reftag = cpu_to_le32(pde5->reftag);
1331
1326 /* advance bpl and increment bde count */ 1332 /* advance bpl and increment bde count */
1327 num_bde++; 1333 num_bde++;
1328 bpl++; 1334 bpl++;
@@ -1341,6 +1347,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1341 bf_set(pde6_ai, pde6, 1); 1347 bf_set(pde6_ai, pde6, 1);
1342 bf_set(pde6_apptagval, pde6, apptagval); 1348 bf_set(pde6_apptagval, pde6, apptagval);
1343 1349
1350 /* Endian convertion if necessary for PDE6 */
1351 pde6->word0 = cpu_to_le32(pde6->word0);
1352 pde6->word1 = cpu_to_le32(pde6->word1);
1353 pde6->word2 = cpu_to_le32(pde6->word2);
1354
1344 /* advance bpl and increment bde count */ 1355 /* advance bpl and increment bde count */
1345 num_bde++; 1356 num_bde++;
1346 bpl++; 1357 bpl++;
@@ -1448,6 +1459,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1448 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1459 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1449 pde5->reftag = reftag; 1460 pde5->reftag = reftag;
1450 1461
1462 /* Endian convertion if necessary for PDE5 */
1463 pde5->word0 = cpu_to_le32(pde5->word0);
1464 pde5->reftag = cpu_to_le32(pde5->reftag);
1465
1451 /* advance bpl and increment bde count */ 1466 /* advance bpl and increment bde count */
1452 num_bde++; 1467 num_bde++;
1453 bpl++; 1468 bpl++;
@@ -1464,6 +1479,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1464 bf_set(pde6_ai, pde6, 1); 1479 bf_set(pde6_ai, pde6, 1);
1465 bf_set(pde6_apptagval, pde6, apptagval); 1480 bf_set(pde6_apptagval, pde6, apptagval);
1466 1481
1482 /* Endian convertion if necessary for PDE6 */
1483 pde6->word0 = cpu_to_le32(pde6->word0);
1484 pde6->word1 = cpu_to_le32(pde6->word1);
1485 pde6->word2 = cpu_to_le32(pde6->word2);
1486
1467 /* advance bpl and increment bde count */ 1487 /* advance bpl and increment bde count */
1468 num_bde++; 1488 num_bde++;
1469 bpl++; 1489 bpl++;
@@ -1475,7 +1495,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1475 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1495 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
1476 protgroup_len = sg_dma_len(sgpe); 1496 protgroup_len = sg_dma_len(sgpe);
1477 1497
1478
1479 /* must be integer multiple of the DIF block length */ 1498 /* must be integer multiple of the DIF block length */
1480 BUG_ON(protgroup_len % 8); 1499 BUG_ON(protgroup_len % 8);
1481 1500
@@ -2293,15 +2312,21 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2293 struct lpfc_vport *vport = pIocbIn->vport; 2312 struct lpfc_vport *vport = pIocbIn->vport;
2294 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 2313 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2295 struct lpfc_nodelist *pnode = rdata->pnode; 2314 struct lpfc_nodelist *pnode = rdata->pnode;
2296 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2315 struct scsi_cmnd *cmd;
2297 int result; 2316 int result;
2298 struct scsi_device *tmp_sdev; 2317 struct scsi_device *tmp_sdev;
2299 int depth; 2318 int depth;
2300 unsigned long flags; 2319 unsigned long flags;
2301 struct lpfc_fast_path_event *fast_path_evt; 2320 struct lpfc_fast_path_event *fast_path_evt;
2302 struct Scsi_Host *shost = cmd->device->host; 2321 struct Scsi_Host *shost;
2303 uint32_t queue_depth, scsi_id; 2322 uint32_t queue_depth, scsi_id;
2304 2323
2324 /* Sanity check on return of outstanding command */
2325 if (!(lpfc_cmd->pCmd))
2326 return;
2327 cmd = lpfc_cmd->pCmd;
2328 shost = cmd->device->host;
2329
2305 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 2330 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2306 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 2331 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2307 /* pick up SLI4 exhange busy status from HBA */ 2332 /* pick up SLI4 exhange busy status from HBA */
@@ -2363,7 +2388,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2363 case IOSTAT_LOCAL_REJECT: 2388 case IOSTAT_LOCAL_REJECT:
2364 if (lpfc_cmd->result == IOERR_INVALID_RPI || 2389 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
2365 lpfc_cmd->result == IOERR_NO_RESOURCES || 2390 lpfc_cmd->result == IOERR_NO_RESOURCES ||
2366 lpfc_cmd->result == IOERR_ABORT_REQUESTED) { 2391 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
2392 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
2367 cmd->result = ScsiResult(DID_REQUEUE, 0); 2393 cmd->result = ScsiResult(DID_REQUEUE, 0);
2368 break; 2394 break;
2369 } 2395 }
@@ -2432,14 +2458,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2432 } 2458 }
2433 spin_unlock_irqrestore(shost->host_lock, flags); 2459 spin_unlock_irqrestore(shost->host_lock, flags);
2434 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2460 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2435 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && 2461 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
2436 time_after(jiffies, pnode->last_change_time + 2462 time_after(jiffies, pnode->last_change_time +
2437 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { 2463 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
2438 spin_lock_irqsave(shost->host_lock, flags); 2464 spin_lock_irqsave(shost->host_lock, flags);
2439 pnode->cmd_qdepth += pnode->cmd_qdepth * 2465 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
2440 LPFC_TGTQ_RAMPUP_PCENT / 100; 2466 / 100;
2441 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) 2467 depth = depth ? depth : 1;
2442 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 2468 pnode->cmd_qdepth += depth;
2469 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
2470 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
2443 pnode->last_change_time = jiffies; 2471 pnode->last_change_time = jiffies;
2444 spin_unlock_irqrestore(shost->host_lock, flags); 2472 spin_unlock_irqrestore(shost->host_lock, flags);
2445 } 2473 }
@@ -2894,8 +2922,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2894 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2922 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2895 goto out_fail_command; 2923 goto out_fail_command;
2896 } 2924 }
2897 if (vport->cfg_max_scsicmpl_time && 2925 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
2898 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
2899 goto out_host_busy; 2926 goto out_host_busy;
2900 2927
2901 lpfc_cmd = lpfc_get_scsi_buf(phba); 2928 lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -3041,7 +3068,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3041 int ret = SUCCESS; 3068 int ret = SUCCESS;
3042 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 3069 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
3043 3070
3044 fc_block_scsi_eh(cmnd); 3071 ret = fc_block_scsi_eh(cmnd);
3072 if (ret)
3073 return ret;
3045 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 3074 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
3046 BUG_ON(!lpfc_cmd); 3075 BUG_ON(!lpfc_cmd);
3047 3076
@@ -3225,7 +3254,9 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3225 lpfc_taskmgmt_name(task_mgmt_cmd), 3254 lpfc_taskmgmt_name(task_mgmt_cmd),
3226 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 3255 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3227 iocbqrsp->iocb.un.ulpWord[4]); 3256 iocbqrsp->iocb.un.ulpWord[4]);
3228 } else 3257 } else if (status == IOCB_BUSY)
3258 ret = FAILED;
3259 else
3229 ret = SUCCESS; 3260 ret = SUCCESS;
3230 3261
3231 lpfc_sli_release_iocbq(phba, iocbqrsp); 3262 lpfc_sli_release_iocbq(phba, iocbqrsp);
@@ -3357,7 +3388,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3357 return FAILED; 3388 return FAILED;
3358 } 3389 }
3359 pnode = rdata->pnode; 3390 pnode = rdata->pnode;
3360 fc_block_scsi_eh(cmnd); 3391 status = fc_block_scsi_eh(cmnd);
3392 if (status)
3393 return status;
3361 3394
3362 status = lpfc_chk_tgt_mapped(vport, cmnd); 3395 status = lpfc_chk_tgt_mapped(vport, cmnd);
3363 if (status == FAILED) { 3396 if (status == FAILED) {
@@ -3422,7 +3455,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3422 return FAILED; 3455 return FAILED;
3423 } 3456 }
3424 pnode = rdata->pnode; 3457 pnode = rdata->pnode;
3425 fc_block_scsi_eh(cmnd); 3458 status = fc_block_scsi_eh(cmnd);
3459 if (status)
3460 return status;
3426 3461
3427 status = lpfc_chk_tgt_mapped(vport, cmnd); 3462 status = lpfc_chk_tgt_mapped(vport, cmnd);
3428 if (status == FAILED) { 3463 if (status == FAILED) {
@@ -3488,7 +3523,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3488 fc_host_post_vendor_event(shost, fc_get_event_number(), 3523 fc_host_post_vendor_event(shost, fc_get_event_number(),
3489 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3524 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3490 3525
3491 fc_block_scsi_eh(cmnd); 3526 ret = fc_block_scsi_eh(cmnd);
3527 if (ret)
3528 return ret;
3492 3529
3493 /* 3530 /*
3494 * Since the driver manages a single bus device, reset all 3531 * Since the driver manages a single bus device, reset all
@@ -3561,11 +3598,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
3561 uint32_t total = 0; 3598 uint32_t total = 0;
3562 uint32_t num_to_alloc = 0; 3599 uint32_t num_to_alloc = 0;
3563 int num_allocated = 0; 3600 int num_allocated = 0;
3601 uint32_t sdev_cnt;
3564 3602
3565 if (!rport || fc_remote_port_chkready(rport)) 3603 if (!rport || fc_remote_port_chkready(rport))
3566 return -ENXIO; 3604 return -ENXIO;
3567 3605
3568 sdev->hostdata = rport->dd_data; 3606 sdev->hostdata = rport->dd_data;
3607 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
3569 3608
3570 /* 3609 /*
3571 * Populate the cmds_per_lun count scsi_bufs into this host's globally 3610 * Populate the cmds_per_lun count scsi_bufs into this host's globally
@@ -3577,6 +3616,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
3577 total = phba->total_scsi_bufs; 3616 total = phba->total_scsi_bufs;
3578 num_to_alloc = vport->cfg_lun_queue_depth + 2; 3617 num_to_alloc = vport->cfg_lun_queue_depth + 2;
3579 3618
3619 /* If allocated buffers are enough do nothing */
3620 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
3621 return 0;
3622
3580 /* Allow some exchanges to be available always to complete discovery */ 3623 /* Allow some exchanges to be available always to complete discovery */
3581 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 3624 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3582 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3625 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
@@ -3658,6 +3701,9 @@ lpfc_slave_configure(struct scsi_device *sdev)
3658static void 3701static void
3659lpfc_slave_destroy(struct scsi_device *sdev) 3702lpfc_slave_destroy(struct scsi_device *sdev)
3660{ 3703{
3704 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3705 struct lpfc_hba *phba = vport->phba;
3706 atomic_dec(&phba->sdev_cnt);
3661 sdev->hostdata = NULL; 3707 sdev->hostdata = NULL;
3662 return; 3708 return;
3663} 3709}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7a61455140b6..e758eae0d0fd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -455,6 +455,11 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
455 struct lpfc_iocbq * iocbq = NULL; 455 struct lpfc_iocbq * iocbq = NULL;
456 456
457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
458
459 if (iocbq)
460 phba->iocb_cnt++;
461 if (phba->iocb_cnt > phba->iocb_max)
462 phba->iocb_max = phba->iocb_cnt;
458 return iocbq; 463 return iocbq;
459} 464}
460 465
@@ -575,7 +580,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
575{ 580{
576 struct lpfc_sglq *sglq; 581 struct lpfc_sglq *sglq;
577 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 582 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
578 unsigned long iflag; 583 unsigned long iflag = 0;
584 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
579 585
580 if (iocbq->sli4_xritag == NO_XRI) 586 if (iocbq->sli4_xritag == NO_XRI)
581 sglq = NULL; 587 sglq = NULL;
@@ -593,6 +599,10 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
593 } else { 599 } else {
594 sglq->state = SGL_FREED; 600 sglq->state = SGL_FREED;
595 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 601 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
602
603 /* Check if TXQ queue needs to be serviced */
604 if (pring->txq_cnt)
605 lpfc_worker_wake_up(phba);
596 } 606 }
597 } 607 }
598 608
@@ -605,6 +615,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
605 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 615 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
606} 616}
607 617
618
608/** 619/**
609 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 620 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
610 * @phba: Pointer to HBA context object. 621 * @phba: Pointer to HBA context object.
@@ -642,6 +653,7 @@ static void
642__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 653__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
643{ 654{
644 phba->__lpfc_sli_release_iocbq(phba, iocbq); 655 phba->__lpfc_sli_release_iocbq(phba, iocbq);
656 phba->iocb_cnt--;
645} 657}
646 658
647/** 659/**
@@ -872,7 +884,11 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
872 struct lpfc_iocbq *piocb) 884 struct lpfc_iocbq *piocb)
873{ 885{
874 list_add_tail(&piocb->list, &pring->txcmplq); 886 list_add_tail(&piocb->list, &pring->txcmplq);
887 piocb->iocb_flag |= LPFC_IO_ON_Q;
875 pring->txcmplq_cnt++; 888 pring->txcmplq_cnt++;
889 if (pring->txcmplq_cnt > pring->txcmplq_max)
890 pring->txcmplq_max = pring->txcmplq_cnt;
891
876 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 892 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
877 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 893 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
878 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 894 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
@@ -897,7 +913,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
897 * the txq, the function returns first iocb in the list after 913 * the txq, the function returns first iocb in the list after
898 * removing the iocb from the list, else it returns NULL. 914 * removing the iocb from the list, else it returns NULL.
899 **/ 915 **/
900static struct lpfc_iocbq * 916struct lpfc_iocbq *
901lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 917lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
902{ 918{
903 struct lpfc_iocbq *cmd_iocb; 919 struct lpfc_iocbq *cmd_iocb;
@@ -2150,7 +2166,10 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2150 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2166 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2151 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2167 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2152 list_del_init(&cmd_iocb->list); 2168 list_del_init(&cmd_iocb->list);
2153 pring->txcmplq_cnt--; 2169 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2170 pring->txcmplq_cnt--;
2171 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2172 }
2154 return cmd_iocb; 2173 return cmd_iocb;
2155 } 2174 }
2156 2175
@@ -2183,7 +2202,10 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2183 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2202 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2184 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2203 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2185 list_del_init(&cmd_iocb->list); 2204 list_del_init(&cmd_iocb->list);
2186 pring->txcmplq_cnt--; 2205 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2206 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2207 pring->txcmplq_cnt--;
2208 }
2187 return cmd_iocb; 2209 return cmd_iocb;
2188 } 2210 }
2189 2211
@@ -3564,13 +3586,16 @@ static int
3564lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 3586lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3565{ 3587{
3566 struct lpfc_sli *psli = &phba->sli; 3588 struct lpfc_sli *psli = &phba->sli;
3567 3589 uint32_t hba_aer_enabled;
3568 3590
3569 /* Restart HBA */ 3591 /* Restart HBA */
3570 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3592 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3571 "0296 Restart HBA Data: x%x x%x\n", 3593 "0296 Restart HBA Data: x%x x%x\n",
3572 phba->pport->port_state, psli->sli_flag); 3594 phba->pport->port_state, psli->sli_flag);
3573 3595
3596 /* Take PCIe device Advanced Error Reporting (AER) state */
3597 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3598
3574 lpfc_sli4_brdreset(phba); 3599 lpfc_sli4_brdreset(phba);
3575 3600
3576 spin_lock_irq(&phba->hbalock); 3601 spin_lock_irq(&phba->hbalock);
@@ -3582,6 +3607,10 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3582 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3607 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3583 psli->stats_start = get_seconds(); 3608 psli->stats_start = get_seconds();
3584 3609
3610 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3611 if (hba_aer_enabled)
3612 pci_disable_pcie_error_reporting(phba->pcidev);
3613
3585 lpfc_hba_down_post(phba); 3614 lpfc_hba_down_post(phba);
3586 3615
3587 return 0; 3616 return 0;
@@ -3794,7 +3823,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
3794 3823
3795 phba->link_state = LPFC_HBA_ERROR; 3824 phba->link_state = LPFC_HBA_ERROR;
3796 mempool_free(pmb, phba->mbox_mem_pool); 3825 mempool_free(pmb, phba->mbox_mem_pool);
3797 return ENXIO; 3826 return -ENXIO;
3798 } 3827 }
3799 } 3828 }
3800 phba->hbq_count = hbq_count; 3829 phba->hbq_count = hbq_count;
@@ -3885,7 +3914,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3885 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 3914 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3886 LPFC_SLI3_HBQ_ENABLED | 3915 LPFC_SLI3_HBQ_ENABLED |
3887 LPFC_SLI3_CRP_ENABLED | 3916 LPFC_SLI3_CRP_ENABLED |
3888 LPFC_SLI3_INB_ENABLED |
3889 LPFC_SLI3_BG_ENABLED); 3917 LPFC_SLI3_BG_ENABLED);
3890 if (rc != MBX_SUCCESS) { 3918 if (rc != MBX_SUCCESS) {
3891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3927,20 +3955,9 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3927 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3955 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3928 if (pmb->u.mb.un.varCfgPort.gcrp) 3956 if (pmb->u.mb.un.varCfgPort.gcrp)
3929 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3957 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3930 if (pmb->u.mb.un.varCfgPort.ginb) { 3958
3931 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3959 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3932 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3960 phba->port_gp = phba->mbox->us.s3_pgp.port;
3933 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3934 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3935 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3936 phba->inb_last_counter =
3937 phba->mbox->us.s3_inb_pgp.counter;
3938 } else {
3939 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3940 phba->port_gp = phba->mbox->us.s3_pgp.port;
3941 phba->inb_ha_copy = NULL;
3942 phba->inb_counter = NULL;
3943 }
3944 3961
3945 if (phba->cfg_enable_bg) { 3962 if (phba->cfg_enable_bg) {
3946 if (pmb->u.mb.un.varCfgPort.gbg) 3963 if (pmb->u.mb.un.varCfgPort.gbg)
@@ -3953,8 +3970,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3953 } else { 3970 } else {
3954 phba->hbq_get = NULL; 3971 phba->hbq_get = NULL;
3955 phba->port_gp = phba->mbox->us.s2.port; 3972 phba->port_gp = phba->mbox->us.s2.port;
3956 phba->inb_ha_copy = NULL;
3957 phba->inb_counter = NULL;
3958 phba->max_vpi = 0; 3973 phba->max_vpi = 0;
3959 } 3974 }
3960do_prep_failed: 3975do_prep_failed:
@@ -4214,7 +4229,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4214 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4229 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4215 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4230 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4216 4231
4217 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size); 4232 memcpy(vpd, dmabuf->virt, *vpd_size);
4233
4218 dma_free_coherent(&phba->pcidev->dev, dma_size, 4234 dma_free_coherent(&phba->pcidev->dev, dma_size,
4219 dmabuf->virt, dmabuf->phys); 4235 dmabuf->virt, dmabuf->phys);
4220 kfree(dmabuf); 4236 kfree(dmabuf);
@@ -4539,6 +4555,24 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4539 /* Start error attention (ERATT) polling timer */ 4555 /* Start error attention (ERATT) polling timer */
4540 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 4556 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4541 4557
4558 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4559 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4560 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4561 if (!rc) {
4562 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4563 "2829 This device supports "
4564 "Advanced Error Reporting (AER)\n");
4565 spin_lock_irq(&phba->hbalock);
4566 phba->hba_flag |= HBA_AER_ENABLED;
4567 spin_unlock_irq(&phba->hbalock);
4568 } else {
4569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4570 "2830 This device does not support "
4571 "Advanced Error Reporting (AER)\n");
4572 phba->cfg_aer_support = 0;
4573 }
4574 }
4575
4542 /* 4576 /*
4543 * The port is ready, set the host's link state to LINK_DOWN 4577 * The port is ready, set the host's link state to LINK_DOWN
4544 * in preparation for link interrupts. 4578 * in preparation for link interrupts.
@@ -5265,7 +5299,8 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5265 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 5299 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5266 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); 5300 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5267 rc = MBXERR_ERROR; 5301 rc = MBXERR_ERROR;
5268 } 5302 } else
5303 lpfc_sli4_swap_str(phba, mboxq);
5269 5304
5270 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5305 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5271 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " 5306 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
@@ -5592,7 +5627,7 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5592 * iocb to the txq when SLI layer cannot submit the command iocb 5627 * iocb to the txq when SLI layer cannot submit the command iocb
5593 * to the ring. 5628 * to the ring.
5594 **/ 5629 **/
5595static void 5630void
5596__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5631__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5597 struct lpfc_iocbq *piocb) 5632 struct lpfc_iocbq *piocb)
5598{ 5633{
@@ -6209,7 +6244,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6209 struct lpfc_iocbq *piocb, uint32_t flag) 6244 struct lpfc_iocbq *piocb, uint32_t flag)
6210{ 6245{
6211 struct lpfc_sglq *sglq; 6246 struct lpfc_sglq *sglq;
6212 uint16_t xritag;
6213 union lpfc_wqe wqe; 6247 union lpfc_wqe wqe;
6214 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6248 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6215 6249
@@ -6218,10 +6252,26 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6218 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6252 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6219 sglq = NULL; 6253 sglq = NULL;
6220 else { 6254 else {
6255 if (pring->txq_cnt) {
6256 if (!(flag & SLI_IOCB_RET_IOCB)) {
6257 __lpfc_sli_ringtx_put(phba,
6258 pring, piocb);
6259 return IOCB_SUCCESS;
6260 } else {
6261 return IOCB_BUSY;
6262 }
6263 } else {
6221 sglq = __lpfc_sli_get_sglq(phba); 6264 sglq = __lpfc_sli_get_sglq(phba);
6222 if (!sglq) 6265 if (!sglq) {
6223 return IOCB_ERROR; 6266 if (!(flag & SLI_IOCB_RET_IOCB)) {
6224 piocb->sli4_xritag = sglq->sli4_xritag; 6267 __lpfc_sli_ringtx_put(phba,
6268 pring,
6269 piocb);
6270 return IOCB_SUCCESS;
6271 } else
6272 return IOCB_BUSY;
6273 }
6274 }
6225 } 6275 }
6226 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 6276 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6227 sglq = NULL; /* These IO's already have an XRI and 6277 sglq = NULL; /* These IO's already have an XRI and
@@ -6237,8 +6287,9 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6237 } 6287 }
6238 6288
6239 if (sglq) { 6289 if (sglq) {
6240 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq); 6290 piocb->sli4_xritag = sglq->sli4_xritag;
6241 if (xritag != sglq->sli4_xritag) 6291
6292 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
6242 return IOCB_ERROR; 6293 return IOCB_ERROR;
6243 } 6294 }
6244 6295
@@ -6278,7 +6329,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6278 * IOCB_SUCCESS - Success 6329 * IOCB_SUCCESS - Success
6279 * IOCB_BUSY - Busy 6330 * IOCB_BUSY - Busy
6280 **/ 6331 **/
6281static inline int 6332int
6282__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 6333__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6283 struct lpfc_iocbq *piocb, uint32_t flag) 6334 struct lpfc_iocbq *piocb, uint32_t flag)
6284{ 6335{
@@ -7095,13 +7146,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7095 */ 7146 */
7096 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 7147 abort_iocb = phba->sli.iocbq_lookup[abort_context];
7097 7148
7098 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
7099 "0327 Cannot abort els iocb %p "
7100 "with tag %x context %x, abort status %x, "
7101 "abort code %x\n",
7102 abort_iocb, abort_iotag, abort_context,
7103 irsp->ulpStatus, irsp->un.ulpWord[4]);
7104
7105 /* 7149 /*
7106 * If the iocb is not found in Firmware queue the iocb 7150 * If the iocb is not found in Firmware queue the iocb
7107 * might have completed already. Do not free it again. 7151 * might have completed already. Do not free it again.
@@ -7120,6 +7164,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7120 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) 7164 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
7121 abort_context = abort_iocb->iocb.ulpContext; 7165 abort_context = abort_iocb->iocb.ulpContext;
7122 } 7166 }
7167
7168 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
7169 "0327 Cannot abort els iocb %p "
7170 "with tag %x context %x, abort status %x, "
7171 "abort code %x\n",
7172 abort_iocb, abort_iotag, abort_context,
7173 irsp->ulpStatus, irsp->un.ulpWord[4]);
7123 /* 7174 /*
7124 * make sure we have the right iocbq before taking it 7175 * make sure we have the right iocbq before taking it
7125 * off the txcmplq and try to call completion routine. 7176 * off the txcmplq and try to call completion routine.
@@ -7137,7 +7188,10 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7137 * following abort XRI from the HBA. 7188 * following abort XRI from the HBA.
7138 */ 7189 */
7139 list_del_init(&abort_iocb->list); 7190 list_del_init(&abort_iocb->list);
7140 pring->txcmplq_cnt--; 7191 if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
7192 abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
7193 pring->txcmplq_cnt--;
7194 }
7141 7195
7142 /* Firmware could still be in progress of DMAing 7196 /* Firmware could still be in progress of DMAing
7143 * payload, so don't free data buffer till after 7197 * payload, so don't free data buffer till after
@@ -7269,8 +7323,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7269 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 7323 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
7270 "0339 Abort xri x%x, original iotag x%x, " 7324 "0339 Abort xri x%x, original iotag x%x, "
7271 "abort cmd iotag x%x\n", 7325 "abort cmd iotag x%x\n",
7326 iabt->un.acxri.abortIoTag,
7272 iabt->un.acxri.abortContextTag, 7327 iabt->un.acxri.abortContextTag,
7273 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7328 abtsiocbp->iotag);
7274 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 7329 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
7275 7330
7276 if (retval) 7331 if (retval)
@@ -7600,7 +7655,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7600 long timeleft, timeout_req = 0; 7655 long timeleft, timeout_req = 0;
7601 int retval = IOCB_SUCCESS; 7656 int retval = IOCB_SUCCESS;
7602 uint32_t creg_val; 7657 uint32_t creg_val;
7603 7658 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
7604 /* 7659 /*
7605 * If the caller has provided a response iocbq buffer, then context2 7660 * If the caller has provided a response iocbq buffer, then context2
7606 * is NULL or its an error. 7661 * is NULL or its an error.
@@ -7622,7 +7677,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7622 readl(phba->HCregaddr); /* flush */ 7677 readl(phba->HCregaddr); /* flush */
7623 } 7678 }
7624 7679
7625 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0); 7680 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
7681 SLI_IOCB_RET_IOCB);
7626 if (retval == IOCB_SUCCESS) { 7682 if (retval == IOCB_SUCCESS) {
7627 timeout_req = timeout * HZ; 7683 timeout_req = timeout * HZ;
7628 timeleft = wait_event_timeout(done_q, 7684 timeleft = wait_event_timeout(done_q,
@@ -7644,6 +7700,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7644 timeout, (timeleft / jiffies)); 7700 timeout, (timeleft / jiffies));
7645 retval = IOCB_TIMEDOUT; 7701 retval = IOCB_TIMEDOUT;
7646 } 7702 }
7703 } else if (retval == IOCB_BUSY) {
7704 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7705 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
7706 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
7707 return retval;
7647 } else { 7708 } else {
7648 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7709 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7649 "0332 IOCB wait issue failed, Data x%x\n", 7710 "0332 IOCB wait issue failed, Data x%x\n",
@@ -7724,9 +7785,10 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
7724 * if LPFC_MBX_WAKE flag is set the mailbox is completed 7785 * if LPFC_MBX_WAKE flag is set the mailbox is completed
7725 * else do not free the resources. 7786 * else do not free the resources.
7726 */ 7787 */
7727 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) 7788 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
7728 retval = MBX_SUCCESS; 7789 retval = MBX_SUCCESS;
7729 else { 7790 lpfc_sli4_swap_str(phba, pmboxq);
7791 } else {
7730 retval = MBX_TIMEOUT; 7792 retval = MBX_TIMEOUT;
7731 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7793 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7732 } 7794 }
@@ -8789,12 +8851,17 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8789{ 8851{
8790 struct lpfc_iocbq *irspiocbq; 8852 struct lpfc_iocbq *irspiocbq;
8791 unsigned long iflags; 8853 unsigned long iflags;
8854 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8792 8855
8793 /* Get an irspiocbq for later ELS response processing use */ 8856 /* Get an irspiocbq for later ELS response processing use */
8794 irspiocbq = lpfc_sli_get_iocbq(phba); 8857 irspiocbq = lpfc_sli_get_iocbq(phba);
8795 if (!irspiocbq) { 8858 if (!irspiocbq) {
8796 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8859 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8797 "0387 Failed to allocate an iocbq\n"); 8860 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
8861 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
8862 pring->txq_cnt, phba->iocb_cnt,
8863 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
8864 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
8798 return false; 8865 return false;
8799 } 8866 }
8800 8867
@@ -9043,9 +9110,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
9043 } 9110 }
9044 } 9111 }
9045 if (unlikely(!cq)) { 9112 if (unlikely(!cq)) {
9046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9113 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
9047 "0365 Slow-path CQ identifier (%d) does " 9114 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9048 "not exist\n", cqid); 9115 "0365 Slow-path CQ identifier "
9116 "(%d) does not exist\n", cqid);
9049 return; 9117 return;
9050 } 9118 }
9051 9119
@@ -9208,6 +9276,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
9208{ 9276{
9209 struct lpfc_wcqe_release wcqe; 9277 struct lpfc_wcqe_release wcqe;
9210 bool workposted = false; 9278 bool workposted = false;
9279 unsigned long iflag;
9211 9280
9212 /* Copy the work queue CQE and convert endian order if needed */ 9281 /* Copy the work queue CQE and convert endian order if needed */
9213 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 9282 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
@@ -9216,6 +9285,9 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
9216 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 9285 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
9217 case CQE_CODE_COMPL_WQE: 9286 case CQE_CODE_COMPL_WQE:
9218 /* Process the WQ complete event */ 9287 /* Process the WQ complete event */
9288 spin_lock_irqsave(&phba->hbalock, iflag);
9289 phba->last_completion_time = jiffies;
9290 spin_unlock_irqrestore(&phba->hbalock, iflag);
9219 lpfc_sli4_fp_handle_fcp_wcqe(phba, 9291 lpfc_sli4_fp_handle_fcp_wcqe(phba,
9220 (struct lpfc_wcqe_complete *)&wcqe); 9292 (struct lpfc_wcqe_complete *)&wcqe);
9221 break; 9293 break;
@@ -9271,9 +9343,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9271 9343
9272 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 9344 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
9273 if (unlikely(!cq)) { 9345 if (unlikely(!cq)) {
9274 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9346 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
9275 "0367 Fast-path completion queue does not " 9347 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9276 "exist\n"); 9348 "0367 Fast-path completion queue "
9349 "does not exist\n");
9277 return; 9350 return;
9278 } 9351 }
9279 9352
@@ -11898,12 +11971,26 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11898 * available rpis maintained by the driver. 11971 * available rpis maintained by the driver.
11899 **/ 11972 **/
11900void 11973void
11974__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11975{
11976 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
11977 phba->sli4_hba.rpi_count--;
11978 phba->sli4_hba.max_cfg_param.rpi_used--;
11979 }
11980}
11981
11982/**
11983 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11984 * @phba: pointer to lpfc hba data structure.
11985 *
11986 * This routine is invoked to release an rpi to the pool of
11987 * available rpis maintained by the driver.
11988 **/
11989void
11901lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 11990lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11902{ 11991{
11903 spin_lock_irq(&phba->hbalock); 11992 spin_lock_irq(&phba->hbalock);
11904 clear_bit(rpi, phba->sli4_hba.rpi_bmask); 11993 __lpfc_sli4_free_rpi(phba, rpi);
11905 phba->sli4_hba.rpi_count--;
11906 phba->sli4_hba.max_cfg_param.rpi_used--;
11907 spin_unlock_irq(&phba->hbalock); 11994 spin_unlock_irq(&phba->hbalock);
11908} 11995}
11909 11996
@@ -12318,18 +12405,47 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12318{ 12405{
12319 uint16_t next_fcf_index; 12406 uint16_t next_fcf_index;
12320 12407
12321 /* Search from the currently registered FCF index */ 12408 /* Search start from next bit of currently registered FCF index */
12409 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
12410 LPFC_SLI4_FCF_TBL_INDX_MAX;
12322 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12411 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12323 LPFC_SLI4_FCF_TBL_INDX_MAX, 12412 LPFC_SLI4_FCF_TBL_INDX_MAX,
12324 phba->fcf.current_rec.fcf_indx); 12413 next_fcf_index);
12414
12325 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 12415 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12326 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 12416 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
12327 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12417 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12328 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 12418 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
12329 /* Round robin failover stop condition */ 12419
12330 if (next_fcf_index == phba->fcf.fcf_rr_init_indx) 12420 /* Check roundrobin failover list empty condition */
12421 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12422 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
12423 "2844 No roundrobin failover FCF available\n");
12331 return LPFC_FCOE_FCF_NEXT_NONE; 12424 return LPFC_FCOE_FCF_NEXT_NONE;
12425 }
12426
12427 /* Check roundrobin failover index bmask stop condition */
12428 if (next_fcf_index == phba->fcf.fcf_rr_init_indx) {
12429 if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) {
12430 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
12431 "2847 Round robin failover FCF index "
12432 "search hit stop condition:x%x\n",
12433 next_fcf_index);
12434 return LPFC_FCOE_FCF_NEXT_NONE;
12435 }
12436 /* The roundrobin failover index bmask updated, start over */
12437 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12438 "2848 Round robin failover FCF index bmask "
12439 "updated, start over\n");
12440 spin_lock_irq(&phba->hbalock);
12441 phba->fcf.fcf_flag &= ~FCF_REDISC_RRU;
12442 spin_unlock_irq(&phba->hbalock);
12443 return phba->fcf.fcf_rr_init_indx;
12444 }
12332 12445
12446 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12447 "2845 Get next round robin failover "
12448 "FCF index x%x\n", next_fcf_index);
12333 return next_fcf_index; 12449 return next_fcf_index;
12334} 12450}
12335 12451
@@ -12359,11 +12475,20 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12359 /* Set the eligible FCF record index bmask */ 12475 /* Set the eligible FCF record index bmask */
12360 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12476 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12361 12477
12478 /* Set the roundrobin index bmask updated */
12479 spin_lock_irq(&phba->hbalock);
12480 phba->fcf.fcf_flag |= FCF_REDISC_RRU;
12481 spin_unlock_irq(&phba->hbalock);
12482
12483 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12484 "2790 Set FCF index x%x to round robin failover "
12485 "bmask\n", fcf_index);
12486
12362 return 0; 12487 return 0;
12363} 12488}
12364 12489
12365/** 12490/**
12366 * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index 12491 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
12367 * @phba: pointer to lpfc hba data structure. 12492 * @phba: pointer to lpfc hba data structure.
12368 * 12493 *
12369 * This routine clears the FCF record index from the eligible bmask for 12494 * This routine clears the FCF record index from the eligible bmask for
@@ -12384,6 +12509,10 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12384 } 12509 }
12385 /* Clear the eligible FCF record index bmask */ 12510 /* Clear the eligible FCF record index bmask */
12386 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12511 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12512
12513 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12514 "2791 Clear FCF index x%x from round robin failover "
12515 "bmask\n", fcf_index);
12387} 12516}
12388 12517
12389/** 12518/**
@@ -12446,7 +12575,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12446} 12575}
12447 12576
12448/** 12577/**
12449 * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port. 12578 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
12450 * @phba: pointer to lpfc hba data structure. 12579 * @phba: pointer to lpfc hba data structure.
12451 * 12580 *
12452 * This routine is invoked to request for rediscovery of the entire FCF table 12581 * This routine is invoked to request for rediscovery of the entire FCF table
@@ -12662,6 +12791,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12662 LPFC_MBOXQ_t *mb, *nextmb; 12791 LPFC_MBOXQ_t *mb, *nextmb;
12663 struct lpfc_dmabuf *mp; 12792 struct lpfc_dmabuf *mp;
12664 struct lpfc_nodelist *ndlp; 12793 struct lpfc_nodelist *ndlp;
12794 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
12665 12795
12666 spin_lock_irq(&phba->hbalock); 12796 spin_lock_irq(&phba->hbalock);
12667 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 12797 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
@@ -12673,6 +12803,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12673 continue; 12803 continue;
12674 12804
12675 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12805 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12806 if (phba->sli_rev == LPFC_SLI_REV4)
12807 __lpfc_sli4_free_rpi(phba,
12808 mb->u.mb.un.varRegLogin.rpi);
12676 mp = (struct lpfc_dmabuf *) (mb->context1); 12809 mp = (struct lpfc_dmabuf *) (mb->context1);
12677 if (mp) { 12810 if (mp) {
12678 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 12811 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -12680,6 +12813,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12680 } 12813 }
12681 ndlp = (struct lpfc_nodelist *) mb->context2; 12814 ndlp = (struct lpfc_nodelist *) mb->context2;
12682 if (ndlp) { 12815 if (ndlp) {
12816 spin_lock_irq(shost->host_lock);
12817 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12818 spin_unlock_irq(shost->host_lock);
12683 lpfc_nlp_put(ndlp); 12819 lpfc_nlp_put(ndlp);
12684 mb->context2 = NULL; 12820 mb->context2 = NULL;
12685 } 12821 }
@@ -12695,6 +12831,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12695 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12831 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12696 ndlp = (struct lpfc_nodelist *) mb->context2; 12832 ndlp = (struct lpfc_nodelist *) mb->context2;
12697 if (ndlp) { 12833 if (ndlp) {
12834 spin_lock_irq(shost->host_lock);
12835 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12836 spin_unlock_irq(shost->host_lock);
12698 lpfc_nlp_put(ndlp); 12837 lpfc_nlp_put(ndlp);
12699 mb->context2 = NULL; 12838 mb->context2 = NULL;
12700 } 12839 }
@@ -12705,3 +12844,85 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12705 spin_unlock_irq(&phba->hbalock); 12844 spin_unlock_irq(&phba->hbalock);
12706} 12845}
12707 12846
12847/**
12848 * lpfc_drain_txq - Drain the txq
12849 * @phba: Pointer to HBA context object.
12850 *
12851 * This function attempt to submit IOCBs on the txq
12852 * to the adapter. For SLI4 adapters, the txq contains
12853 * ELS IOCBs that have been deferred because the there
12854 * are no SGLs. This congestion can occur with large
12855 * vport counts during node discovery.
12856 **/
12857
12858uint32_t
12859lpfc_drain_txq(struct lpfc_hba *phba)
12860{
12861 LIST_HEAD(completions);
12862 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
12863 struct lpfc_iocbq *piocbq = 0;
12864 unsigned long iflags = 0;
12865 char *fail_msg = NULL;
12866 struct lpfc_sglq *sglq;
12867 union lpfc_wqe wqe;
12868
12869 spin_lock_irqsave(&phba->hbalock, iflags);
12870 if (pring->txq_cnt > pring->txq_max)
12871 pring->txq_max = pring->txq_cnt;
12872
12873 spin_unlock_irqrestore(&phba->hbalock, iflags);
12874
12875 while (pring->txq_cnt) {
12876 spin_lock_irqsave(&phba->hbalock, iflags);
12877
12878 sglq = __lpfc_sli_get_sglq(phba);
12879 if (!sglq) {
12880 spin_unlock_irqrestore(&phba->hbalock, iflags);
12881 break;
12882 } else {
12883 piocbq = lpfc_sli_ringtx_get(phba, pring);
12884 if (!piocbq) {
12885 /* The txq_cnt out of sync. This should
12886 * never happen
12887 */
12888 sglq = __lpfc_clear_active_sglq(phba,
12889 sglq->sli4_xritag);
12890 spin_unlock_irqrestore(&phba->hbalock, iflags);
12891 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12892 "2823 txq empty and txq_cnt is %d\n ",
12893 pring->txq_cnt);
12894 break;
12895 }
12896 }
12897
12898 /* The xri and iocb resources secured,
12899 * attempt to issue request
12900 */
12901 piocbq->sli4_xritag = sglq->sli4_xritag;
12902 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
12903 fail_msg = "to convert bpl to sgl";
12904 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
12905 fail_msg = "to convert iocb to wqe";
12906 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
12907 fail_msg = " - Wq is full";
12908 else
12909 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
12910
12911 if (fail_msg) {
12912 /* Failed means we can't issue and need to cancel */
12913 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12914 "2822 IOCB failed %s iotag 0x%x "
12915 "xri 0x%x\n",
12916 fail_msg,
12917 piocbq->iotag, piocbq->sli4_xritag);
12918 list_add_tail(&piocbq->list, &completions);
12919 }
12920 spin_unlock_irqrestore(&phba->hbalock, iflags);
12921 }
12922
12923 /* Cancel all the IOCBs that cannot be issued */
12924 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12925 IOERR_SLI_ABORTED);
12926
12927 return pring->txq_cnt;
12928}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index e3792151ca06..cd56d6cce6c3 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -48,6 +48,7 @@ struct lpfc_iocbq {
48 /* lpfc_iocbqs are used in double linked lists */ 48 /* lpfc_iocbqs are used in double linked lists */
49 struct list_head list; 49 struct list_head list;
50 struct list_head clist; 50 struct list_head clist;
51 struct list_head dlist;
51 uint16_t iotag; /* pre-assigned IO tag */ 52 uint16_t iotag; /* pre-assigned IO tag */
52 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 53 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
53 struct lpfc_cq_event cq_event; 54 struct lpfc_cq_event cq_event;
@@ -64,6 +65,7 @@ struct lpfc_iocbq {
64#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ 65#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
65#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ 66#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
66#define DSS_SECURITY_OP 0x100 /* security IO */ 67#define DSS_SECURITY_OP 0x100 /* security IO */
68#define LPFC_IO_ON_Q 0x200 /* The IO is still on the TXCMPLQ */
67 69
68#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 70#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
69#define LPFC_FIP_ELS_ID_SHIFT 14 71#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 58bb4c81b54e..a3b24d99a2a7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -51,6 +51,9 @@
51#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF 51#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
52#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF 52#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
53 53
54#define LPFC_FCOE_NULL_VID 0xFFF
55#define LPFC_FCOE_IGNORE_VID 0xFFFF
56
54/* First 3 bytes of default FCF MAC is specified by FC_MAP */ 57/* First 3 bytes of default FCF MAC is specified by FC_MAP */
55#define LPFC_FCOE_FCF_MAC3 0xFF 58#define LPFC_FCOE_FCF_MAC3 0xFF
56#define LPFC_FCOE_FCF_MAC4 0xFF 59#define LPFC_FCOE_FCF_MAC4 0xFF
@@ -58,7 +61,7 @@
58#define LPFC_FCOE_FCF_MAP0 0x0E 61#define LPFC_FCOE_FCF_MAP0 0x0E
59#define LPFC_FCOE_FCF_MAP1 0xFC 62#define LPFC_FCOE_FCF_MAP1 0xFC
60#define LPFC_FCOE_FCF_MAP2 0x00 63#define LPFC_FCOE_FCF_MAP2 0x00
61#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC 64#define LPFC_FCOE_MAX_RCV_SIZE 0x800
62#define LPFC_FCOE_FKA_ADV_PER 0 65#define LPFC_FCOE_FKA_ADV_PER 0
63#define LPFC_FCOE_FIP_PRIORITY 0x80 66#define LPFC_FCOE_FIP_PRIORITY 0x80
64 67
@@ -160,6 +163,7 @@ struct lpfc_fcf {
160#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ 163#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
161#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ 164#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
162#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 165#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
166#define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */
163 uint32_t addr_mode; 167 uint32_t addr_mode;
164 uint16_t fcf_rr_init_indx; 168 uint16_t fcf_rr_init_indx;
165 uint32_t eligible_fcf_cnt; 169 uint32_t eligible_fcf_cnt;
@@ -382,6 +386,7 @@ struct lpfc_sli4_hba {
382 struct lpfc_pc_sli4_params pc_sli4_params; 386 struct lpfc_pc_sli4_params pc_sli4_params;
383 struct msix_entry *msix_entries; 387 struct msix_entry *msix_entries;
384 uint32_t cfg_eqn; 388 uint32_t cfg_eqn;
389 uint32_t msix_vec_nr;
385 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 390 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
386 /* Pointers to the constructed SLI4 queues */ 391 /* Pointers to the constructed SLI4 queues */
387 struct lpfc_queue **fp_eq; /* Fast-path event queue */ 392 struct lpfc_queue **fp_eq; /* Fast-path event queue */
@@ -524,6 +529,7 @@ int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
524struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); 529struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
525void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); 530void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
526int lpfc_sli4_alloc_rpi(struct lpfc_hba *); 531int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
532void __lpfc_sli4_free_rpi(struct lpfc_hba *, int);
527void lpfc_sli4_free_rpi(struct lpfc_hba *, int); 533void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
528void lpfc_sli4_remove_rpis(struct lpfc_hba *); 534void lpfc_sli4_remove_rpis(struct lpfc_hba *);
529void lpfc_sli4_async_event_proc(struct lpfc_hba *); 535void lpfc_sli4_async_event_proc(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 5294c3a515a1..d28830af71d8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.12" 21#define LPFC_DRIVER_VERSION "8.3.15"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index ab91359bde20..1655507a682c 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -782,7 +782,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
782 int i; 782 int i;
783 if (vports == NULL) 783 if (vports == NULL)
784 return; 784 return;
785 for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++) 785 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
786 scsi_host_put(lpfc_shost_from_vport(vports[i])); 786 scsi_host_put(lpfc_shost_from_vport(vports[i]));
787 kfree(vports); 787 kfree(vports);
788} 788}