aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2012-07-25 15:58:48 -0400
committerJeff Garzik <jgarzik@redhat.com>2012-07-25 15:58:48 -0400
commit8407884dd9164ec18ed2afc00f56b87e36c51fcf (patch)
treeb6ea42c231d7d39f454de28a068d78ce11709770 /drivers/scsi
parentdc7f71f486f4f5fa96f6dcf86833da020cde8a11 (diff)
parentbdc0077af574800d24318b6945cf2344e8dbb050 (diff)
Merge branch 'master' [vanilla Linus master] into libata-dev.git/upstream
Two bits were appended to the end of the bitfield list in struct scsi_device. Resolve that conflict by including both bits. Conflicts: include/scsi/scsi_device.h
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig19
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c237
-rw-r--r--drivers/scsi/aacraid/aacraid.h79
-rw-r--r--drivers/scsi/aacraid/commctrl.c2
-rw-r--r--drivers/scsi/aacraid/comminit.c54
-rw-r--r--drivers/scsi/aacraid/commsup.c31
-rw-r--r--drivers/scsi/aacraid/dpcsup.c6
-rw-r--r--drivers/scsi/aacraid/linit.c16
-rw-r--r--drivers/scsi/aacraid/nark.c4
-rw-r--r--drivers/scsi/aacraid/rkt.c2
-rw-r--r--drivers/scsi/aacraid/rx.c4
-rw-r--r--drivers/scsi/aacraid/sa.c4
-rw-r--r--drivers/scsi/aacraid/src.c96
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c5
-rw-r--r--drivers/scsi/bfa/bfad_im.c12
-rw-r--r--drivers/scsi/bnx2fc/Makefile3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.c70
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h73
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c100
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c25
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c40
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h59
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c38
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c40
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c21
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c9
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c38
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c71
-rw-r--r--drivers/scsi/fcoe/fcoe.c36
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c13
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c12
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/hptiop.c10
-rw-r--r--drivers/scsi/hptiop.h1
-rw-r--r--drivers/scsi/isci/init.c3
-rw-r--r--drivers/scsi/libfc/fc_exch.c130
-rw-r--r--drivers/scsi/libfc/fc_fcp.c22
-rw-r--r--drivers/scsi/libfc/fc_frame.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c69
-rw-r--r--drivers/scsi/libsas/sas_ata.c53
-rw-r--r--drivers/scsi/libsas/sas_discover.c23
-rw-r--r--drivers/scsi/libsas/sas_event.c12
-rw-r--r--drivers/scsi/libsas/sas_expander.c74
-rw-r--r--drivers/scsi/libsas/sas_init.c39
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c195
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h45
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c233
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c131
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c21
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c51
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c55
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c156
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/scsi/scsi_lib.c104
-rw-r--r--drivers/scsi/scsi_netlink.c7
-rw-r--r--drivers/scsi/scsi_pm.c23
-rw-r--r--drivers/scsi/scsi_priv.h10
-rw-r--r--drivers/scsi/scsi_scan.c34
-rw-r--r--drivers/scsi/scsi_sysfs.c56
-rw-r--r--drivers/scsi/scsi_transport_fc.c34
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c15
-rw-r--r--drivers/scsi/scsi_wait_scan.c42
-rw-r--r--drivers/scsi/sd.c10
-rw-r--r--drivers/scsi/ufs/ufshcd.c35
-rw-r--r--drivers/scsi/virtio_scsi.c337
103 files changed, 2527 insertions, 1060 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e9559782d3ec..74bf1aa7af46 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,23 +263,6 @@ config SCSI_SCAN_ASYNC
263 You can override this choice by specifying "scsi_mod.scan=sync" 263 You can override this choice by specifying "scsi_mod.scan=sync"
264 or async on the kernel's command line. 264 or async on the kernel's command line.
265 265
266config SCSI_WAIT_SCAN
267 tristate # No prompt here, this is an invisible symbol.
268 default m
269 depends on SCSI
270 depends on MODULES
271# scsi_wait_scan is a loadable module which waits until all the async scans are
272# complete. The idea is to use it in initrd/ initramfs scripts. You modprobe
273# it after all the modprobes of the root SCSI drivers and it will wait until
274# they have all finished scanning their buses before allowing the boot to
275# proceed. (This method is not applicable if targets boot independently in
276# parallel with the initiator, or with transports with non-deterministic target
277# discovery schemes, or if a transport driver does not support scsi_wait_scan.)
278#
279# This symbol is not exposed as a prompt because little is to be gained by
280# disabling it, whereas people who accidentally switch it off may wonder why
281# their mkinitrd gets into trouble.
282
283menu "SCSI Transports" 266menu "SCSI Transports"
284 depends on SCSI 267 depends on SCSI
285 268
@@ -461,7 +444,7 @@ config SCSI_ACARD
461 444
462config SCSI_AHA152X 445config SCSI_AHA152X
463 tristate "Adaptec AHA152X/2825 support" 446 tristate "Adaptec AHA152X/2825 support"
464 depends on ISA && SCSI && !64BIT 447 depends on ISA && SCSI
465 select SCSI_SPI_ATTRS 448 select SCSI_SPI_ATTRS
466 select CHECK_SIGNATURE 449 select CHECK_SIGNATURE
467 ---help--- 450 ---help---
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1a3368b08615..888f73a4aae1 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -159,8 +159,6 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
159# This goes last, so that "real" scsi devices probe earlier 159# This goes last, so that "real" scsi devices probe earlier
160obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o 160obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
161 161
162obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o
163
164scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ 162scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
165 scsicam.o scsi_error.o scsi_lib.o 163 scsicam.o scsi_error.o scsi_lib.o
166scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o 164scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 52551662d107..d79457ac8bef 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -135,6 +135,8 @@ struct inquiry_data {
135static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap); 135static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
136static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg); 136static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
137static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg); 137static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
138static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max);
139static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new);
138static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); 140static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
139#ifdef AAC_DETAILED_STATUS_INFO 141#ifdef AAC_DETAILED_STATUS_INFO
140static char *aac_get_status_string(u32 status); 142static char *aac_get_status_string(u32 status);
@@ -152,10 +154,14 @@ int aac_commit = -1;
152int startup_timeout = 180; 154int startup_timeout = 180;
153int aif_timeout = 120; 155int aif_timeout = 120;
154int aac_sync_mode; /* Only Sync. transfer - disabled */ 156int aac_sync_mode; /* Only Sync. transfer - disabled */
157int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
155 158
156module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR); 159module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
157MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode" 160MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
158 " 0=off, 1=on"); 161 " 0=off, 1=on");
162module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
163MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
164 " 0=off, 1=on");
159module_param(nondasd, int, S_IRUGO|S_IWUSR); 165module_param(nondasd, int, S_IRUGO|S_IWUSR);
160MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." 166MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
161 " 0=off, 1=on"); 167 " 0=off, 1=on");
@@ -963,25 +969,44 @@ static void io_callback(void *context, struct fib * fibptr);
963 969
964static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 970static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
965{ 971{
966 u16 fibsize; 972 struct aac_dev *dev = fib->dev;
967 struct aac_raw_io *readcmd; 973 u16 fibsize, command;
974
968 aac_fib_init(fib); 975 aac_fib_init(fib);
969 readcmd = (struct aac_raw_io *) fib_data(fib); 976 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
970 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); 977 struct aac_raw_io2 *readcmd2;
971 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 978 readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
972 readcmd->count = cpu_to_le32(count<<9); 979 memset(readcmd2, 0, sizeof(struct aac_raw_io2));
973 readcmd->cid = cpu_to_le16(scmd_id(cmd)); 980 readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
974 readcmd->flags = cpu_to_le16(IO_TYPE_READ); 981 readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
975 readcmd->bpTotal = 0; 982 readcmd2->byteCount = cpu_to_le32(count<<9);
976 readcmd->bpComplete = 0; 983 readcmd2->cid = cpu_to_le16(scmd_id(cmd));
984 readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
985 aac_build_sgraw2(cmd, readcmd2, dev->scsi_host_ptr->sg_tablesize);
986 command = ContainerRawIo2;
987 fibsize = sizeof(struct aac_raw_io2) +
988 ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
989 } else {
990 struct aac_raw_io *readcmd;
991 readcmd = (struct aac_raw_io *) fib_data(fib);
992 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
993 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
994 readcmd->count = cpu_to_le32(count<<9);
995 readcmd->cid = cpu_to_le16(scmd_id(cmd));
996 readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
997 readcmd->bpTotal = 0;
998 readcmd->bpComplete = 0;
999 aac_build_sgraw(cmd, &readcmd->sg);
1000 command = ContainerRawIo;
1001 fibsize = sizeof(struct aac_raw_io) +
1002 ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
1003 }
977 1004
978 aac_build_sgraw(cmd, &readcmd->sg);
979 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
980 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); 1005 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
981 /* 1006 /*
982 * Now send the Fib to the adapter 1007 * Now send the Fib to the adapter
983 */ 1008 */
984 return aac_fib_send(ContainerRawIo, 1009 return aac_fib_send(command,
985 fib, 1010 fib,
986 fibsize, 1011 fibsize,
987 FsaNormal, 1012 FsaNormal,
@@ -1052,28 +1077,50 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
1052 1077
1053static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) 1078static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1054{ 1079{
1055 u16 fibsize; 1080 struct aac_dev *dev = fib->dev;
1056 struct aac_raw_io *writecmd; 1081 u16 fibsize, command;
1082
1057 aac_fib_init(fib); 1083 aac_fib_init(fib);
1058 writecmd = (struct aac_raw_io *) fib_data(fib); 1084 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
1059 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); 1085 struct aac_raw_io2 *writecmd2;
1060 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 1086 writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
1061 writecmd->count = cpu_to_le32(count<<9); 1087 memset(writecmd2, 0, sizeof(struct aac_raw_io2));
1062 writecmd->cid = cpu_to_le16(scmd_id(cmd)); 1088 writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1063 writecmd->flags = (fua && ((aac_cache & 5) != 1) && 1089 writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1064 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? 1090 writecmd2->byteCount = cpu_to_le32(count<<9);
1065 cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) : 1091 writecmd2->cid = cpu_to_le16(scmd_id(cmd));
1066 cpu_to_le16(IO_TYPE_WRITE); 1092 writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
1067 writecmd->bpTotal = 0; 1093 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1068 writecmd->bpComplete = 0; 1094 cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
1069 1095 cpu_to_le16(RIO2_IO_TYPE_WRITE);
1070 aac_build_sgraw(cmd, &writecmd->sg); 1096 aac_build_sgraw2(cmd, writecmd2, dev->scsi_host_ptr->sg_tablesize);
1071 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw)); 1097 command = ContainerRawIo2;
1098 fibsize = sizeof(struct aac_raw_io2) +
1099 ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1100 } else {
1101 struct aac_raw_io *writecmd;
1102 writecmd = (struct aac_raw_io *) fib_data(fib);
1103 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1104 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1105 writecmd->count = cpu_to_le32(count<<9);
1106 writecmd->cid = cpu_to_le16(scmd_id(cmd));
1107 writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
1108 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1109 cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
1110 cpu_to_le16(RIO_TYPE_WRITE);
1111 writecmd->bpTotal = 0;
1112 writecmd->bpComplete = 0;
1113 aac_build_sgraw(cmd, &writecmd->sg);
1114 command = ContainerRawIo;
1115 fibsize = sizeof(struct aac_raw_io) +
1116 ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
1117 }
1118
1072 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); 1119 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1073 /* 1120 /*
1074 * Now send the Fib to the adapter 1121 * Now send the Fib to the adapter
1075 */ 1122 */
1076 return aac_fib_send(ContainerRawIo, 1123 return aac_fib_send(command,
1077 fib, 1124 fib,
1078 fibsize, 1125 fibsize,
1079 FsaNormal, 1126 FsaNormal,
@@ -1492,8 +1539,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
1492 dev->a_ops.adapter_write = aac_write_block; 1539 dev->a_ops.adapter_write = aac_write_block;
1493 } 1540 }
1494 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 1541 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
1495 if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1)
1496 dev->adapter_info.options |= AAC_OPT_NEW_COMM;
1497 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 1542 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
1498 /* 1543 /*
1499 * Worst case size that could cause sg overflow when 1544 * Worst case size that could cause sg overflow when
@@ -2616,12 +2661,18 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2616 srbreply = (struct aac_srb_reply *) fib_data(fibptr); 2661 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
2617 2662
2618 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ 2663 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
2619 /*
2620 * Calculate resid for sg
2621 */
2622 2664
2623 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) 2665 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
2624 - le32_to_cpu(srbreply->data_xfer_length)); 2666 /* fast response */
2667 srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
2668 srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
2669 } else {
2670 /*
2671 * Calculate resid for sg
2672 */
2673 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
2674 - le32_to_cpu(srbreply->data_xfer_length));
2675 }
2625 2676
2626 scsi_dma_unmap(scsicmd); 2677 scsi_dma_unmap(scsicmd);
2627 2678
@@ -2954,6 +3005,118 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
2954 return byte_count; 3005 return byte_count;
2955} 3006}
2956 3007
3008static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max)
3009{
3010 unsigned long byte_count = 0;
3011 int nseg;
3012
3013 nseg = scsi_dma_map(scsicmd);
3014 BUG_ON(nseg < 0);
3015 if (nseg) {
3016 struct scatterlist *sg;
3017 int i, conformable = 0;
3018 u32 min_size = PAGE_SIZE, cur_size;
3019
3020 scsi_for_each_sg(scsicmd, sg, nseg, i) {
3021 int count = sg_dma_len(sg);
3022 u64 addr = sg_dma_address(sg);
3023
3024 BUG_ON(i >= sg_max);
3025 rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
3026 rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
3027 cur_size = cpu_to_le32(count);
3028 rio2->sge[i].length = cur_size;
3029 rio2->sge[i].flags = 0;
3030 if (i == 0) {
3031 conformable = 1;
3032 rio2->sgeFirstSize = cur_size;
3033 } else if (i == 1) {
3034 rio2->sgeNominalSize = cur_size;
3035 min_size = cur_size;
3036 } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
3037 conformable = 0;
3038 if (cur_size < min_size)
3039 min_size = cur_size;
3040 }
3041 byte_count += count;
3042 }
3043
3044 /* hba wants the size to be exact */
3045 if (byte_count > scsi_bufflen(scsicmd)) {
3046 u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
3047 (byte_count - scsi_bufflen(scsicmd));
3048 rio2->sge[i-1].length = cpu_to_le32(temp);
3049 byte_count = scsi_bufflen(scsicmd);
3050 }
3051
3052 rio2->sgeCnt = cpu_to_le32(nseg);
3053 rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
3054 /* not conformable: evaluate required sg elements */
3055 if (!conformable) {
3056 int j, nseg_new = nseg, err_found;
3057 for (i = min_size / PAGE_SIZE; i >= 1; --i) {
3058 err_found = 0;
3059 nseg_new = 2;
3060 for (j = 1; j < nseg - 1; ++j) {
3061 if (rio2->sge[j].length % (i*PAGE_SIZE)) {
3062 err_found = 1;
3063 break;
3064 }
3065 nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
3066 }
3067 if (!err_found)
3068 break;
3069 }
3070 if (i > 0 && nseg_new <= sg_max)
3071 aac_convert_sgraw2(rio2, i, nseg, nseg_new);
3072 } else
3073 rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
3074
3075 /* Check for command underflow */
3076 if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3077 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3078 byte_count, scsicmd->underflow);
3079 }
3080 }
3081
3082 return byte_count;
3083}
3084
3085static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
3086{
3087 struct sge_ieee1212 *sge;
3088 int i, j, pos;
3089 u32 addr_low;
3090
3091 if (aac_convert_sgl == 0)
3092 return 0;
3093
3094 sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
3095 if (sge == NULL)
3096 return -1;
3097
3098 for (i = 1, pos = 1; i < nseg-1; ++i) {
3099 for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
3100 addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
3101 sge[pos].addrLow = addr_low;
3102 sge[pos].addrHigh = rio2->sge[i].addrHigh;
3103 if (addr_low < rio2->sge[i].addrLow)
3104 sge[pos].addrHigh++;
3105 sge[pos].length = pages * PAGE_SIZE;
3106 sge[pos].flags = 0;
3107 pos++;
3108 }
3109 }
3110 sge[pos] = rio2->sge[nseg-1];
3111 memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
3112
3113 kfree(sge);
3114 rio2->sgeCnt = cpu_to_le32(nseg_new);
3115 rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
3116 rio2->sgeNominalSize = pages * PAGE_SIZE;
3117 return 0;
3118}
3119
2957#ifdef AAC_DETAILED_STATUS_INFO 3120#ifdef AAC_DETAILED_STATUS_INFO
2958 3121
2959struct aac_srb_status_info { 3122struct aac_srb_status_info {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3fcf62724fad..9e933a88a8bc 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 28900 15# define AAC_DRIVER_BUILD 29800
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -100,6 +100,13 @@ struct user_sgentryraw {
100 u32 flags; /* reserved for F/W use */ 100 u32 flags; /* reserved for F/W use */
101}; 101};
102 102
103struct sge_ieee1212 {
104 u32 addrLow;
105 u32 addrHigh;
106 u32 length;
107 u32 flags;
108};
109
103/* 110/*
104 * SGMAP 111 * SGMAP
105 * 112 *
@@ -270,6 +277,8 @@ enum aac_queue_types {
270 */ 277 */
271 278
272#define FIB_MAGIC 0x0001 279#define FIB_MAGIC 0x0001
280#define FIB_MAGIC2 0x0004
281#define FIB_MAGIC2_64 0x0005
273 282
274/* 283/*
275 * Define the priority levels the FSA communication routines support. 284 * Define the priority levels the FSA communication routines support.
@@ -296,22 +305,20 @@ struct aac_fibhdr {
296 __le32 XferState; /* Current transfer state for this CCB */ 305 __le32 XferState; /* Current transfer state for this CCB */
297 __le16 Command; /* Routing information for the destination */ 306 __le16 Command; /* Routing information for the destination */
298 u8 StructType; /* Type FIB */ 307 u8 StructType; /* Type FIB */
299 u8 Flags; /* Flags for FIB */ 308 u8 Unused; /* Unused */
300 __le16 Size; /* Size of this FIB in bytes */ 309 __le16 Size; /* Size of this FIB in bytes */
301 __le16 SenderSize; /* Size of the FIB in the sender 310 __le16 SenderSize; /* Size of the FIB in the sender
302 (for response sizing) */ 311 (for response sizing) */
303 __le32 SenderFibAddress; /* Host defined data in the FIB */ 312 __le32 SenderFibAddress; /* Host defined data in the FIB */
304 __le32 ReceiverFibAddress;/* Logical address of this FIB for
305 the adapter */
306 u32 SenderData; /* Place holder for the sender to store data */
307 union { 313 union {
308 struct { 314 __le32 ReceiverFibAddress;/* Logical address of this FIB for
309 __le32 _ReceiverTimeStart; /* Timestamp for 315 the adapter (old) */
310 receipt of fib */ 316 __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */
311 __le32 _ReceiverTimeDone; /* Timestamp for 317 __le32 TimeStamp; /* otherwise timestamp for FW internal use */
312 completion of fib */ 318 } u;
313 } _s; 319 u32 Handle; /* FIB handle used for MSGU commnunication */
314 } _u; 320 u32 Previous; /* FW internal use */
321 u32 Next; /* FW internal use */
315}; 322};
316 323
317struct hw_fib { 324struct hw_fib {
@@ -361,6 +368,7 @@ struct hw_fib {
361#define ContainerCommand 500 368#define ContainerCommand 500
362#define ContainerCommand64 501 369#define ContainerCommand64 501
363#define ContainerRawIo 502 370#define ContainerRawIo 502
371#define ContainerRawIo2 503
364/* 372/*
365 * Scsi Port commands (scsi passthrough) 373 * Scsi Port commands (scsi passthrough)
366 */ 374 */
@@ -417,6 +425,7 @@ enum fib_xfer_state {
417#define ADAPTER_INIT_STRUCT_REVISION 3 425#define ADAPTER_INIT_STRUCT_REVISION 3
418#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science 426#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science
419#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ 427#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */
428#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */
420 429
421struct aac_init 430struct aac_init
422{ 431{
@@ -441,7 +450,9 @@ struct aac_init
441#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 450#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
442#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 451#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
443#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 452#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
444#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041 453#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040
454#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080
455#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100
445 __le32 MaxIoCommands; /* max outstanding commands */ 456 __le32 MaxIoCommands; /* max outstanding commands */
446 __le32 MaxIoSize; /* largest I/O command */ 457 __le32 MaxIoSize; /* largest I/O command */
447 __le32 MaxFibSize; /* largest FIB to adapter */ 458 __le32 MaxFibSize; /* largest FIB to adapter */
@@ -1052,10 +1063,11 @@ struct aac_dev
1052 struct adapter_ops a_ops; 1063 struct adapter_ops a_ops;
1053 unsigned long fsrev; /* Main driver's revision number */ 1064 unsigned long fsrev; /* Main driver's revision number */
1054 1065
1055 unsigned long dbg_base; /* address of UART 1066 resource_size_t base_start; /* main IO base */
1067 resource_size_t dbg_base; /* address of UART
1056 * debug buffer */ 1068 * debug buffer */
1057 1069
1058 unsigned base_size, dbg_size; /* Size of 1070 resource_size_t base_size, dbg_size; /* Size of
1059 * mapped in region */ 1071 * mapped in region */
1060 1072
1061 struct aac_init *init; /* Holds initialization info to communicate with adapter */ 1073 struct aac_init *init; /* Holds initialization info to communicate with adapter */
@@ -1123,6 +1135,7 @@ struct aac_dev
1123# define AAC_COMM_PRODUCER 0 1135# define AAC_COMM_PRODUCER 0
1124# define AAC_COMM_MESSAGE 1 1136# define AAC_COMM_MESSAGE 1
1125# define AAC_COMM_MESSAGE_TYPE1 3 1137# define AAC_COMM_MESSAGE_TYPE1 3
1138# define AAC_COMM_MESSAGE_TYPE2 4
1126 u8 raw_io_interface; 1139 u8 raw_io_interface;
1127 u8 raw_io_64; 1140 u8 raw_io_64;
1128 u8 printf_enabled; 1141 u8 printf_enabled;
@@ -1181,6 +1194,7 @@ struct aac_dev
1181#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) 1194#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
1182#define FIB_CONTEXT_FLAG (0x00000002) 1195#define FIB_CONTEXT_FLAG (0x00000002)
1183#define FIB_CONTEXT_FLAG_WAIT (0x00000004) 1196#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
1197#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008)
1184 1198
1185/* 1199/*
1186 * Define the command values 1200 * Define the command values
@@ -1287,6 +1301,22 @@ struct aac_dev
1287#define CMDATA_SYNCH 4 1301#define CMDATA_SYNCH 4
1288#define CMUNSTABLE 5 1302#define CMUNSTABLE 5
1289 1303
1304#define RIO_TYPE_WRITE 0x0000
1305#define RIO_TYPE_READ 0x0001
1306#define RIO_SUREWRITE 0x0008
1307
1308#define RIO2_IO_TYPE 0x0003
1309#define RIO2_IO_TYPE_WRITE 0x0000
1310#define RIO2_IO_TYPE_READ 0x0001
1311#define RIO2_IO_TYPE_VERIFY 0x0002
1312#define RIO2_IO_ERROR 0x0004
1313#define RIO2_IO_SUREWRITE 0x0008
1314#define RIO2_SGL_CONFORMANT 0x0010
1315#define RIO2_SG_FORMAT 0xF000
1316#define RIO2_SG_FORMAT_ARC 0x0000
1317#define RIO2_SG_FORMAT_SRL 0x1000
1318#define RIO2_SG_FORMAT_IEEE1212 0x2000
1319
1290struct aac_read 1320struct aac_read
1291{ 1321{
1292 __le32 command; 1322 __le32 command;
@@ -1331,9 +1361,6 @@ struct aac_write64
1331 __le32 block; 1361 __le32 block;
1332 __le16 pad; 1362 __le16 pad;
1333 __le16 flags; 1363 __le16 flags;
1334#define IO_TYPE_WRITE 0x00000000
1335#define IO_TYPE_READ 0x00000001
1336#define IO_SUREWRITE 0x00000008
1337 struct sgmap64 sg; // Must be last in struct because it is variable 1364 struct sgmap64 sg; // Must be last in struct because it is variable
1338}; 1365};
1339struct aac_write_reply 1366struct aac_write_reply
@@ -1354,6 +1381,22 @@ struct aac_raw_io
1354 struct sgmapraw sg; 1381 struct sgmapraw sg;
1355}; 1382};
1356 1383
1384struct aac_raw_io2 {
1385 __le32 blockLow;
1386 __le32 blockHigh;
1387 __le32 byteCount;
1388 __le16 cid;
1389 __le16 flags; /* RIO2 flags */
1390 __le32 sgeFirstSize; /* size of first sge el. */
1391 __le32 sgeNominalSize; /* size of 2nd sge el. (if conformant) */
1392 u8 sgeCnt; /* only 8 bits required */
1393 u8 bpTotal; /* reserved for F/W use */
1394 u8 bpComplete; /* reserved for F/W use */
1395 u8 sgeFirstIndex; /* reserved for F/W use */
1396 u8 unused[4];
1397 struct sge_ieee1212 sge[1];
1398};
1399
1357#define CT_FLUSH_CACHE 129 1400#define CT_FLUSH_CACHE 129
1358struct aac_synchronize { 1401struct aac_synchronize {
1359 __le32 command; /* VM_ContainerConfig */ 1402 __le32 command; /* VM_ContainerConfig */
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 0bd38da4ada0..1ef041bc60c8 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -498,6 +498,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
498 return -ENOMEM; 498 return -ENOMEM;
499 } 499 }
500 aac_fib_init(srbfib); 500 aac_fib_init(srbfib);
501 /* raw_srb FIB is not FastResponseCapable */
502 srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
501 503
502 srbcmd = (struct aac_srb*) fib_data(srbfib); 504 srbcmd = (struct aac_srb*) fib_data(srbfib);
503 505
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index a35f54ebdce0..8e5d3be16127 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -58,7 +58,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
58 dma_addr_t phys; 58 dma_addr_t phys;
59 unsigned long aac_max_hostphysmempages; 59 unsigned long aac_max_hostphysmempages;
60 60
61 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) 61 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
62 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2)
62 host_rrq_size = (dev->scsi_host_ptr->can_queue 63 host_rrq_size = (dev->scsi_host_ptr->can_queue
63 + AAC_NUM_MGT_FIB) * sizeof(u32); 64 + AAC_NUM_MGT_FIB) * sizeof(u32);
64 size = fibsize + sizeof(struct aac_init) + commsize + 65 size = fibsize + sizeof(struct aac_init) + commsize +
@@ -75,7 +76,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
75 dev->comm_phys = phys; 76 dev->comm_phys = phys;
76 dev->comm_size = size; 77 dev->comm_size = size;
77 78
78 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { 79 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
80 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
79 dev->host_rrq = (u32 *)(base + fibsize); 81 dev->host_rrq = (u32 *)(base + fibsize);
80 dev->host_rrq_pa = phys + fibsize; 82 dev->host_rrq_pa = phys + fibsize;
81 memset(dev->host_rrq, 0, host_rrq_size); 83 memset(dev->host_rrq, 0, host_rrq_size);
@@ -115,26 +117,32 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
115 else 117 else
116 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); 118 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
117 119
118 init->InitFlags = 0; 120 init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
121 INITFLAGS_DRIVER_SUPPORTS_PM);
122 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
123 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
124 init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
125 init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
126
119 if (dev->comm_interface == AAC_COMM_MESSAGE) { 127 if (dev->comm_interface == AAC_COMM_MESSAGE) {
120 init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); 128 init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
121 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); 129 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
122 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { 130 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
123 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); 131 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
124 init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED); 132 init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
125 dprintk((KERN_WARNING 133 INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
126 "aacraid: New Comm Interface type1 enabled\n")); 134 init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
135 init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
136 dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n"));
137 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
138 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
139 init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
140 INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
141 init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
142 init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
143 init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */
144 dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
127 } 145 }
128 init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
129 INITFLAGS_DRIVER_SUPPORTS_PM);
130 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
131 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
132 init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
133
134 init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
135 init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32);
136 init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff);
137
138 146
139 /* 147 /*
140 * Increment the base address by the amount already used 148 * Increment the base address by the amount already used
@@ -354,13 +362,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
354 if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) { 362 if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
355 /* driver supports TYPE1 (Tupelo) */ 363 /* driver supports TYPE1 (Tupelo) */
356 dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; 364 dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
365 } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
366 /* driver supports TYPE2 (Denali) */
367 dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
357 } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) || 368 } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
358 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3)) || 369 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) {
359 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) { 370 /* driver doesn't TYPE3 and TYPE4 */
360 /* driver doesn't support TYPE2 (Series7), TYPE3 and TYPE4 */ 371 /* switch to sync. mode */
361 /* switch to sync. mode */ 372 dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
362 dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; 373 dev->sync_mode = 1;
363 dev->sync_mode = 1;
364 } 374 }
365 } 375 }
366 if ((dev->comm_interface == AAC_COMM_MESSAGE) && 376 if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 4b32ca442433..1be0776a80c4 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -136,6 +136,7 @@ int aac_fib_setup(struct aac_dev * dev)
136 i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); 136 i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
137 i++, fibptr++) 137 i++, fibptr++)
138 { 138 {
139 fibptr->flags = 0;
139 fibptr->dev = dev; 140 fibptr->dev = dev;
140 fibptr->hw_fib_va = hw_fib; 141 fibptr->hw_fib_va = hw_fib;
141 fibptr->data = (void *) fibptr->hw_fib_va->data; 142 fibptr->data = (void *) fibptr->hw_fib_va->data;
@@ -240,11 +241,11 @@ void aac_fib_init(struct fib *fibptr)
240{ 241{
241 struct hw_fib *hw_fib = fibptr->hw_fib_va; 242 struct hw_fib *hw_fib = fibptr->hw_fib_va;
242 243
244 memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
243 hw_fib->header.StructType = FIB_MAGIC; 245 hw_fib->header.StructType = FIB_MAGIC;
244 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); 246 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
245 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); 247 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
246 hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */ 248 hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
247 hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
248 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); 249 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
249} 250}
250 251
@@ -259,7 +260,6 @@ void aac_fib_init(struct fib *fibptr)
259static void fib_dealloc(struct fib * fibptr) 260static void fib_dealloc(struct fib * fibptr)
260{ 261{
261 struct hw_fib *hw_fib = fibptr->hw_fib_va; 262 struct hw_fib *hw_fib = fibptr->hw_fib_va;
262 BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
263 hw_fib->header.XferState = 0; 263 hw_fib->header.XferState = 0;
264} 264}
265 265
@@ -370,7 +370,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
370 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 370 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
371 entry->addr = hw_fib->header.SenderFibAddress; 371 entry->addr = hw_fib->header.SenderFibAddress;
372 /* Restore adapters pointer to the FIB */ 372 /* Restore adapters pointer to the FIB */
373 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ 373 hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
374 map = 0; 374 map = 0;
375 } 375 }
376 /* 376 /*
@@ -450,7 +450,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
450 */ 450 */
451 451
452 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); 452 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
453 hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); 453 hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
454 /* 454 /*
455 * Set FIB state to indicate where it came from and if we want a 455 * Set FIB state to indicate where it came from and if we want a
456 * response from the adapter. Also load the command from the 456 * response from the adapter. Also load the command from the
@@ -460,7 +460,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
460 */ 460 */
461 hw_fib->header.Command = cpu_to_le16(command); 461 hw_fib->header.Command = cpu_to_le16(command);
462 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); 462 hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
463 fibptr->hw_fib_va->header.Flags = 0; /* 0 the flags field - internal only*/
464 /* 463 /*
465 * Set the size of the Fib we want to send to the adapter 464 * Set the size of the Fib we want to send to the adapter
466 */ 465 */
@@ -564,10 +563,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
564 * functioning because an interrupt routing or other 563 * functioning because an interrupt routing or other
565 * hardware failure has occurred. 564 * hardware failure has occurred.
566 */ 565 */
567 unsigned long count = 36000000L; /* 3 minutes */ 566 unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
568 while (down_trylock(&fibptr->event_wait)) { 567 while (down_trylock(&fibptr->event_wait)) {
569 int blink; 568 int blink;
570 if (--count == 0) { 569 if (time_is_before_eq_jiffies(timeout)) {
571 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; 570 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
572 spin_lock_irqsave(q->lock, qflags); 571 spin_lock_irqsave(q->lock, qflags);
573 q->numpending--; 572 q->numpending--;
@@ -588,7 +587,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
588 } 587 }
589 return -EFAULT; 588 return -EFAULT;
590 } 589 }
591 udelay(5); 590 /* We used to udelay() here but that absorbed
591 * a CPU when a timeout occured. Not very
592 * useful. */
593 cpu_relax();
592 } 594 }
593 } else if (down_interruptible(&fibptr->event_wait)) { 595 } else if (down_interruptible(&fibptr->event_wait)) {
594 /* Do nothing ... satisfy 596 /* Do nothing ... satisfy
@@ -708,7 +710,8 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
708 unsigned long nointr = 0; 710 unsigned long nointr = 0;
709 unsigned long qflags; 711 unsigned long qflags;
710 712
711 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { 713 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
714 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
712 kfree(hw_fib); 715 kfree(hw_fib);
713 return 0; 716 return 0;
714 } 717 }
@@ -721,7 +724,9 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
721 /* 724 /*
722 * If we plan to do anything check the structure type first. 725 * If we plan to do anything check the structure type first.
723 */ 726 */
724 if (hw_fib->header.StructType != FIB_MAGIC) { 727 if (hw_fib->header.StructType != FIB_MAGIC &&
728 hw_fib->header.StructType != FIB_MAGIC2 &&
729 hw_fib->header.StructType != FIB_MAGIC2_64) {
725 if (dev->comm_interface == AAC_COMM_MESSAGE) 730 if (dev->comm_interface == AAC_COMM_MESSAGE)
726 kfree(hw_fib); 731 kfree(hw_fib);
727 return -EINVAL; 732 return -EINVAL;
@@ -783,7 +788,9 @@ int aac_fib_complete(struct fib *fibptr)
783 * If we plan to do anything check the structure type first. 788 * If we plan to do anything check the structure type first.
784 */ 789 */
785 790
786 if (hw_fib->header.StructType != FIB_MAGIC) 791 if (hw_fib->header.StructType != FIB_MAGIC &&
792 hw_fib->header.StructType != FIB_MAGIC2 &&
793 hw_fib->header.StructType != FIB_MAGIC2_64)
787 return -EINVAL; 794 return -EINVAL;
788 /* 795 /*
789 * This block completes a cdb which orginated on the host and we 796 * This block completes a cdb which orginated on the host and we
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index f0c66a80ad13..d81b2810f0f7 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -101,6 +101,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
101 */ 101 */
102 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); 102 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
103 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); 103 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
104 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
104 } 105 }
105 106
106 FIB_COUNTER_INCREMENT(aac_config.FibRecved); 107 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
@@ -121,7 +122,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
121 * NOTE: we cannot touch the fib after this 122 * NOTE: we cannot touch the fib after this
122 * call, because it may have been deallocated. 123 * call, because it may have been deallocated.
123 */ 124 */
124 fib->flags = 0; 125 fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
125 fib->callback(fib->callback_data, fib); 126 fib->callback(fib->callback_data, fib);
126 } else { 127 } else {
127 unsigned long flagv; 128 unsigned long flagv;
@@ -367,6 +368,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
367 */ 368 */
368 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); 369 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
369 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); 370 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
371 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
370 } 372 }
371 373
372 FIB_COUNTER_INCREMENT(aac_config.FibRecved); 374 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
@@ -387,7 +389,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
387 * NOTE: we cannot touch the fib after this 389 * NOTE: we cannot touch the fib after this
388 * call, because it may have been deallocated. 390 * call, because it may have been deallocated.
389 */ 391 */
390 fib->flags = 0; 392 fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
391 fib->callback(fib->callback_data, fib); 393 fib->callback(fib->callback_data, fib);
392 } else { 394 } else {
393 unsigned long flagv; 395 unsigned long flagv;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0d279c445a30..7199534cd07d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1089,8 +1089,17 @@ static struct scsi_host_template aac_driver_template = {
1089 1089
1090static void __aac_shutdown(struct aac_dev * aac) 1090static void __aac_shutdown(struct aac_dev * aac)
1091{ 1091{
1092 if (aac->aif_thread) 1092 if (aac->aif_thread) {
1093 int i;
1094 /* Clear out events first */
1095 for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
1096 struct fib *fib = &aac->fibs[i];
1097 if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1098 (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
1099 up(&fib->event_wait);
1100 }
1093 kthread_stop(aac->thread); 1101 kthread_stop(aac->thread);
1102 }
1094 aac_send_shutdown(aac); 1103 aac_send_shutdown(aac);
1095 aac_adapter_disable_int(aac); 1104 aac_adapter_disable_int(aac);
1096 free_irq(aac->pdev->irq, aac); 1105 free_irq(aac->pdev->irq, aac);
@@ -1145,11 +1154,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1145 goto out_disable_pdev; 1154 goto out_disable_pdev;
1146 1155
1147 shost->irq = pdev->irq; 1156 shost->irq = pdev->irq;
1148 shost->base = pci_resource_start(pdev, 0);
1149 shost->unique_id = unique_id; 1157 shost->unique_id = unique_id;
1150 shost->max_cmd_len = 16; 1158 shost->max_cmd_len = 16;
1151 1159
1152 aac = (struct aac_dev *)shost->hostdata; 1160 aac = (struct aac_dev *)shost->hostdata;
1161 aac->base_start = pci_resource_start(pdev, 0);
1153 aac->scsi_host_ptr = shost; 1162 aac->scsi_host_ptr = shost;
1154 aac->pdev = pdev; 1163 aac->pdev = pdev;
1155 aac->name = aac_driver_template.name; 1164 aac->name = aac_driver_template.name;
@@ -1157,7 +1166,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1157 aac->cardtype = index; 1166 aac->cardtype = index;
1158 INIT_LIST_HEAD(&aac->entry); 1167 INIT_LIST_HEAD(&aac->entry);
1159 1168
1160 aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL); 1169 aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
1161 if (!aac->fibs) 1170 if (!aac->fibs)
1162 goto out_free_host; 1171 goto out_free_host;
1163 spin_lock_init(&aac->fib_lock); 1172 spin_lock_init(&aac->fib_lock);
@@ -1191,6 +1200,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1191 if (IS_ERR(aac->thread)) { 1200 if (IS_ERR(aac->thread)) {
1192 printk(KERN_ERR "aacraid: Unable to create command thread.\n"); 1201 printk(KERN_ERR "aacraid: Unable to create command thread.\n");
1193 error = PTR_ERR(aac->thread); 1202 error = PTR_ERR(aac->thread);
1203 aac->thread = NULL;
1194 goto out_deinit; 1204 goto out_deinit;
1195 } 1205 }
1196 1206
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index f397d21a0c06..6c53b1d8b2ba 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -49,14 +49,14 @@ static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
49 dev->base = NULL; 49 dev->base = NULL;
50 return 0; 50 return 0;
51 } 51 }
52 dev->scsi_host_ptr->base = pci_resource_start(dev->pdev, 2); 52 dev->base_start = pci_resource_start(dev->pdev, 2);
53 dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) | 53 dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) |
54 ((u64)pci_resource_start(dev->pdev, 1) << 32), 54 ((u64)pci_resource_start(dev->pdev, 1) << 32),
55 sizeof(struct rx_registers) - sizeof(struct rx_inbound)); 55 sizeof(struct rx_registers) - sizeof(struct rx_inbound));
56 dev->base = NULL; 56 dev->base = NULL;
57 if (dev->regs.rx == NULL) 57 if (dev->regs.rx == NULL)
58 return -1; 58 return -1;
59 dev->base = ioremap(dev->scsi_host_ptr->base, size); 59 dev->base = ioremap(dev->base_start, size);
60 if (dev->base == NULL) { 60 if (dev->base == NULL) {
61 iounmap(dev->regs.rx); 61 iounmap(dev->regs.rx);
62 dev->regs.rx = NULL; 62 dev->regs.rx = NULL;
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index be44de92429a..7d8013feedde 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -79,7 +79,7 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
79 iounmap(dev->regs.rkt); 79 iounmap(dev->regs.rkt);
80 return 0; 80 return 0;
81 } 81 }
82 dev->base = dev->regs.rkt = ioremap(dev->scsi_host_ptr->base, size); 82 dev->base = dev->regs.rkt = ioremap(dev->base_start, size);
83 if (dev->base == NULL) 83 if (dev->base == NULL)
84 return -1; 84 return -1;
85 dev->IndexRegs = &dev->regs.rkt->IndexRegs; 85 dev->IndexRegs = &dev->regs.rkt->IndexRegs;
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index b029c7cc785b..dada38aeacc0 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -471,7 +471,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
471 iounmap(dev->regs.rx); 471 iounmap(dev->regs.rx);
472 return 0; 472 return 0;
473 } 473 }
474 dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size); 474 dev->base = dev->regs.rx = ioremap(dev->base_start, size);
475 if (dev->base == NULL) 475 if (dev->base == NULL)
476 return -1; 476 return -1;
477 dev->IndexRegs = &dev->regs.rx->IndexRegs; 477 dev->IndexRegs = &dev->regs.rx->IndexRegs;
@@ -653,7 +653,7 @@ int _aac_rx_init(struct aac_dev *dev)
653 name, instance); 653 name, instance);
654 goto error_iounmap; 654 goto error_iounmap;
655 } 655 }
656 dev->dbg_base = dev->scsi_host_ptr->base; 656 dev->dbg_base = dev->base_start;
657 dev->dbg_base_mapped = dev->base; 657 dev->dbg_base_mapped = dev->base;
658 dev->dbg_size = dev->base_size; 658 dev->dbg_size = dev->base_size;
659 659
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index beb533630d4b..2244f315f33b 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -305,7 +305,7 @@ static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
305 iounmap(dev->regs.sa); 305 iounmap(dev->regs.sa);
306 return 0; 306 return 0;
307 } 307 }
308 dev->base = dev->regs.sa = ioremap(dev->scsi_host_ptr->base, size); 308 dev->base = dev->regs.sa = ioremap(dev->base_start, size);
309 return (dev->base == NULL) ? -1 : 0; 309 return (dev->base == NULL) ? -1 : 0;
310} 310}
311 311
@@ -393,7 +393,7 @@ int aac_sa_init(struct aac_dev *dev)
393 name, instance); 393 name, instance);
394 goto error_iounmap; 394 goto error_iounmap;
395 } 395 }
396 dev->dbg_base = dev->scsi_host_ptr->base; 396 dev->dbg_base = dev->base_start;
397 dev->dbg_base_mapped = dev->base; 397 dev->dbg_base_mapped = dev->base;
398 dev->dbg_size = dev->base_size; 398 dev->dbg_size = dev->base_size;
399 399
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 762820636304..3b021ec63255 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -56,25 +56,14 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
56 if (bellbits & PmDoorBellResponseSent) { 56 if (bellbits & PmDoorBellResponseSent) {
57 bellbits = PmDoorBellResponseSent; 57 bellbits = PmDoorBellResponseSent;
58 /* handle async. status */ 58 /* handle async. status */
59 src_writel(dev, MUnit.ODR_C, bellbits);
60 src_readl(dev, MUnit.ODR_C);
59 our_interrupt = 1; 61 our_interrupt = 1;
60 index = dev->host_rrq_idx; 62 index = dev->host_rrq_idx;
61 if (dev->host_rrq[index] == 0) {
62 u32 old_index = index;
63 /* adjust index */
64 do {
65 index++;
66 if (index == dev->scsi_host_ptr->can_queue +
67 AAC_NUM_MGT_FIB)
68 index = 0;
69 if (dev->host_rrq[index] != 0)
70 break;
71 } while (index != old_index);
72 dev->host_rrq_idx = index;
73 }
74 for (;;) { 63 for (;;) {
75 isFastResponse = 0; 64 isFastResponse = 0;
76 /* remove toggle bit (31) */ 65 /* remove toggle bit (31) */
77 handle = (dev->host_rrq[index] & 0x7fffffff); 66 handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff;
78 /* check fast response bit (30) */ 67 /* check fast response bit (30) */
79 if (handle & 0x40000000) 68 if (handle & 0x40000000)
80 isFastResponse = 1; 69 isFastResponse = 1;
@@ -93,6 +82,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
93 } else { 82 } else {
94 bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); 83 bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
95 if (bellbits_shifted & DoorBellAifPending) { 84 if (bellbits_shifted & DoorBellAifPending) {
85 src_writel(dev, MUnit.ODR_C, bellbits);
86 src_readl(dev, MUnit.ODR_C);
96 our_interrupt = 1; 87 our_interrupt = 1;
97 /* handle AIF */ 88 /* handle AIF */
98 aac_intr_normal(dev, 0, 2, 0, NULL); 89 aac_intr_normal(dev, 0, 2, 0, NULL);
@@ -100,6 +91,13 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
100 unsigned long sflags; 91 unsigned long sflags;
101 struct list_head *entry; 92 struct list_head *entry;
102 int send_it = 0; 93 int send_it = 0;
94 extern int aac_sync_mode;
95
96 if (!aac_sync_mode) {
97 src_writel(dev, MUnit.ODR_C, bellbits);
98 src_readl(dev, MUnit.ODR_C);
99 our_interrupt = 1;
100 }
103 101
104 if (dev->sync_fib) { 102 if (dev->sync_fib) {
105 our_interrupt = 1; 103 our_interrupt = 1;
@@ -132,7 +130,6 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
132 } 130 }
133 131
134 if (our_interrupt) { 132 if (our_interrupt) {
135 src_writel(dev, MUnit.ODR_C, bellbits);
136 return IRQ_HANDLED; 133 return IRQ_HANDLED;
137 } 134 }
138 return IRQ_NONE; 135 return IRQ_NONE;
@@ -336,6 +333,9 @@ static void aac_src_start_adapter(struct aac_dev *dev)
336{ 333{
337 struct aac_init *init; 334 struct aac_init *init;
338 335
336 /* reset host_rrq_idx first */
337 dev->host_rrq_idx = 0;
338
339 init = dev->init; 339 init = dev->init;
340 init->HostElapsedSeconds = cpu_to_le32(get_seconds()); 340 init->HostElapsedSeconds = cpu_to_le32(get_seconds());
341 341
@@ -389,30 +389,51 @@ static int aac_src_deliver_message(struct fib *fib)
389 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 389 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
390 unsigned long qflags; 390 unsigned long qflags;
391 u32 fibsize; 391 u32 fibsize;
392 u64 address; 392 dma_addr_t address;
393 struct aac_fib_xporthdr *pFibX; 393 struct aac_fib_xporthdr *pFibX;
394 u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
394 395
395 spin_lock_irqsave(q->lock, qflags); 396 spin_lock_irqsave(q->lock, qflags);
396 q->numpending++; 397 q->numpending++;
397 spin_unlock_irqrestore(q->lock, qflags); 398 spin_unlock_irqrestore(q->lock, qflags);
398 399
399 /* Calculate the amount to the fibsize bits */ 400 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
400 fibsize = (sizeof(struct aac_fib_xporthdr) + 401 /* Calculate the amount to the fibsize bits */
401 fib->hw_fib_va->header.Size + 127) / 128 - 1; 402 fibsize = (hdr_size + 127) / 128 - 1;
402 if (fibsize > (ALIGN32 - 1)) 403 if (fibsize > (ALIGN32 - 1))
403 fibsize = ALIGN32 - 1; 404 return -EMSGSIZE;
404 405 /* New FIB header, 32-bit */
405 /* Fill XPORT header */ 406 address = fib->hw_fib_pa;
406 pFibX = (struct aac_fib_xporthdr *) 407 fib->hw_fib_va->header.StructType = FIB_MAGIC2;
407 ((unsigned char *)fib->hw_fib_va - 408 fib->hw_fib_va->header.SenderFibAddress = (u32)address;
408 sizeof(struct aac_fib_xporthdr)); 409 fib->hw_fib_va->header.u.TimeStamp = 0;
409 pFibX->Handle = fib->hw_fib_va->header.SenderData + 1; 410 BUG_ON((u32)(address >> 32) != 0L);
410 pFibX->HostAddress = fib->hw_fib_pa; 411 address |= fibsize;
411 pFibX->Size = fib->hw_fib_va->header.Size; 412 } else {
412 address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr); 413 /* Calculate the amount to the fibsize bits */
413 414 fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1;
414 src_writel(dev, MUnit.IQ_H, (u32)(address >> 32)); 415 if (fibsize > (ALIGN32 - 1))
415 src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize); 416 return -EMSGSIZE;
417
418 /* Fill XPORT header */
419 pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr);
420 pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle);
421 pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa);
422 pFibX->Size = cpu_to_le32(hdr_size);
423
424 /*
425 * The xport header has been 32-byte aligned for us so that fibsize
426 * can be masked out of this address by hardware. -- BenC
427 */
428 address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr);
429 if (address & (ALIGN32 - 1))
430 return -EINVAL;
431 address |= fibsize;
432 }
433
434 src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
435 src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
436
416 return 0; 437 return 0;
417} 438}
418 439
@@ -435,8 +456,7 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
435 dev->base = NULL; 456 dev->base = NULL;
436 if (dev->regs.src.bar1 == NULL) 457 if (dev->regs.src.bar1 == NULL)
437 return -1; 458 return -1;
438 dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, 459 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
439 size);
440 if (dev->base == NULL) { 460 if (dev->base == NULL) {
441 iounmap(dev->regs.src.bar1); 461 iounmap(dev->regs.src.bar1);
442 dev->regs.src.bar1 = NULL; 462 dev->regs.src.bar1 = NULL;
@@ -459,7 +479,7 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
459 dev->base = dev->regs.src.bar0 = NULL; 479 dev->base = dev->regs.src.bar0 = NULL;
460 return 0; 480 return 0;
461 } 481 }
462 dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, size); 482 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
463 if (dev->base == NULL) 483 if (dev->base == NULL)
464 return -1; 484 return -1;
465 dev->IndexRegs = &((struct src_registers __iomem *) 485 dev->IndexRegs = &((struct src_registers __iomem *)
@@ -753,7 +773,7 @@ int aac_srcv_init(struct aac_dev *dev)
753 773
754 if (aac_init_adapter(dev) == NULL) 774 if (aac_init_adapter(dev) == NULL)
755 goto error_iounmap; 775 goto error_iounmap;
756 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) 776 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
757 goto error_iounmap; 777 goto error_iounmap;
758 dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 778 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
759 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 779 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
@@ -764,7 +784,7 @@ int aac_srcv_init(struct aac_dev *dev)
764 name, instance); 784 name, instance);
765 goto error_iounmap; 785 goto error_iounmap;
766 } 786 }
767 dev->dbg_base = dev->scsi_host_ptr->base; 787 dev->dbg_base = dev->base_start;
768 dev->dbg_base_mapped = dev->base; 788 dev->dbg_base_mapped = dev->base;
769 dev->dbg_size = dev->base_size; 789 dev->dbg_size = dev->base_size;
770 790
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 19a36945e6fd..dd4547bf6881 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -2984,8 +2984,8 @@ static int get_command(char *pos, Scsi_Cmnd * ptr)
2984 char *start = pos; 2984 char *start = pos;
2985 int i; 2985 int i;
2986 2986
2987 SPRINTF("0x%08x: target=%d; lun=%d; cmnd=( ", 2987 SPRINTF("%p: target=%d; lun=%d; cmnd=( ",
2988 (unsigned int) ptr, ptr->device->id, ptr->device->lun); 2988 ptr, ptr->device->id, ptr->device->lun);
2989 2989
2990 for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) 2990 for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++)
2991 SPRINTF("0x%02x ", ptr->cmnd[i]); 2991 SPRINTF("0x%02x ", ptr->cmnd[i]);
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index f79c8f9e33a4..770c48ddbe5e 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -49,7 +49,7 @@
49#define SCSI_BUF_PA(address) isa_virt_to_bus(address) 49#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
50#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset) 50#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
51 51
52#include<linux/stat.h> 52#include <linux/stat.h>
53 53
54#ifdef DEBUG 54#ifdef DEBUG
55#define DEB(x) x 55#define DEB(x) x
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 532d212b6b2c..393e7ce8e95a 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
201 201
202 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { 202 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
203 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6)); 203 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
204 memcpy(&resp->ending_fis[0], r+16, 24); 204 memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
205 ts->buf_valid_size = sizeof(*resp); 205 ts->buf_valid_size = sizeof(*resp);
206 } 206 }
207 } 207 }
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index cbde1dca45ad..def24a1079ad 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2821,7 +2821,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
2821 int i, count = 0; 2821 int i, count = 0;
2822 struct MessageUnit_A __iomem *pmuA = acb->pmuA; 2822 struct MessageUnit_A __iomem *pmuA = acb->pmuA;
2823 struct MessageUnit_C __iomem *pmuC = acb->pmuC; 2823 struct MessageUnit_C __iomem *pmuC = acb->pmuC;
2824 u32 temp = 0; 2824
2825 /* backup pci config data */ 2825 /* backup pci config data */
2826 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); 2826 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
2827 for (i = 0; i < 64; i++) { 2827 for (i = 0; i < 64; i++) {
@@ -2839,7 +2839,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
2839 writel(0x2, &pmuC->write_sequence); 2839 writel(0x2, &pmuC->write_sequence);
2840 writel(0x7, &pmuC->write_sequence); 2840 writel(0x7, &pmuC->write_sequence);
2841 writel(0xD, &pmuC->write_sequence); 2841 writel(0xD, &pmuC->write_sequence);
2842 } while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); 2842 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
2843 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); 2843 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
2844 } else { 2844 } else {
2845 pci_write_config_byte(acb->pdev, 0x84, 0x20); 2845 pci_write_config_byte(acb->pdev, 0x84, 0x20);
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 937000db62a8..bcc4966e8ba4 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5722,9 +5722,7 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
5722 * The memory for the bfad_vport_s is freed from the FC function 5722 * The memory for the bfad_vport_s is freed from the FC function
5723 * template vport_delete entry point. 5723 * template vport_delete entry point.
5724 */ 5724 */
5725 if (vport_drv) 5725 bfad_im_port_delete(vport_drv->drv_port.bfad, &vport_drv->drv_port);
5726 bfad_im_port_delete(vport_drv->drv_port.bfad,
5727 &vport_drv->drv_port);
5728} 5726}
5729 5727
5730/* 5728/*
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 14e6284e48e4..8cdb79c2fcdf 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -2357,7 +2357,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2357 return; 2357 return;
2358 } 2358 }
2359 2359
2360 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 2360 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2361 return; 2361 return;
2362 2362
2363 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 2363 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 2e4b0be14a20..2c8f0c713076 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1383,6 +1383,8 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1383 bfa_sm_set_state(bfad, bfad_sm_uninit); 1383 bfa_sm_set_state(bfad, bfad_sm_uninit);
1384 1384
1385 spin_lock_init(&bfad->bfad_lock); 1385 spin_lock_init(&bfad->bfad_lock);
1386 spin_lock_init(&bfad->bfad_aen_spinlock);
1387
1386 pci_set_drvdata(pdev, bfad); 1388 pci_set_drvdata(pdev, bfad);
1387 1389
1388 bfad->ref_count = 0; 1390 bfad->ref_count = 0;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index e1f4b10df42a..9c1495b321d9 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3008,12 +3008,15 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
3008 * buffer of size bsg_data->payload_len 3008 * buffer of size bsg_data->payload_len
3009 */ 3009 */
3010 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL); 3010 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
3011 if (!bsg_fcpt) 3011 if (!bsg_fcpt) {
3012 rc = -ENOMEM;
3012 goto out; 3013 goto out;
3014 }
3013 3015
3014 if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload, 3016 if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
3015 bsg_data->payload_len)) { 3017 bsg_data->payload_len)) {
3016 kfree(bsg_fcpt); 3018 kfree(bsg_fcpt);
3019 rc = -EIO;
3017 goto out; 3020 goto out;
3018 } 3021 }
3019 3022
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 1ac09afe35ee..2eebf8d4d58b 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -687,25 +687,21 @@ bfa_status_t
687bfad_im_probe(struct bfad_s *bfad) 687bfad_im_probe(struct bfad_s *bfad)
688{ 688{
689 struct bfad_im_s *im; 689 struct bfad_im_s *im;
690 bfa_status_t rc = BFA_STATUS_OK;
691 690
692 im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL); 691 im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
693 if (im == NULL) { 692 if (im == NULL)
694 rc = BFA_STATUS_ENOMEM; 693 return BFA_STATUS_ENOMEM;
695 goto ext;
696 }
697 694
698 bfad->im = im; 695 bfad->im = im;
699 im->bfad = bfad; 696 im->bfad = bfad;
700 697
701 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { 698 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
702 kfree(im); 699 kfree(im);
703 rc = BFA_STATUS_FAILED; 700 return BFA_STATUS_FAILED;
704 } 701 }
705 702
706 INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler); 703 INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
707ext: 704 return BFA_STATUS_OK;
708 return rc;
709} 705}
710 706
711void 707void
diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile
index a92695a25176..141149e8cdad 100644
--- a/drivers/scsi/bnx2fc/Makefile
+++ b/drivers/scsi/bnx2fc/Makefile
@@ -1,3 +1,4 @@
1obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o 1obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
2 2
3bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o 3bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o \
4 bnx2fc_debug.o
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 0578fa0dc14b..3486845ba301 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -11,6 +11,8 @@
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12 */ 12 */
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
16#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -57,12 +59,12 @@
57#include <scsi/fc/fc_fcp.h> 59#include <scsi/fc/fc_fcp.h>
58 60
59#include "57xx_hsi_bnx2fc.h" 61#include "57xx_hsi_bnx2fc.h"
60#include "bnx2fc_debug.h"
61#include "../../net/ethernet/broadcom/cnic_if.h" 62#include "../../net/ethernet/broadcom/cnic_if.h"
63#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
62#include "bnx2fc_constants.h" 64#include "bnx2fc_constants.h"
63 65
64#define BNX2FC_NAME "bnx2fc" 66#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.11" 67#define BNX2FC_VERSION "1.0.12"
66 68
67#define PFX "bnx2fc: " 69#define PFX "bnx2fc: "
68 70
@@ -84,6 +86,8 @@
84#define BNX2FC_NUM_MAX_SESS 1024 86#define BNX2FC_NUM_MAX_SESS 1024
85#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) 87#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
86 88
89#define BNX2FC_MAX_NPIV 256
90
87#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 91#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
88#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS 92#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
89#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE 93#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
@@ -206,6 +210,7 @@ struct bnx2fc_hba {
206 struct fcoe_statistics_params *stats_buffer; 210 struct fcoe_statistics_params *stats_buffer;
207 dma_addr_t stats_buf_dma; 211 dma_addr_t stats_buf_dma;
208 struct completion stat_req_done; 212 struct completion stat_req_done;
213 struct fcoe_capabilities fcoe_cap;
209 214
210 /*destroy handling */ 215 /*destroy handling */
211 struct timer_list destroy_timer; 216 struct timer_list destroy_timer;
@@ -274,6 +279,7 @@ struct bnx2fc_rport {
274#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6 279#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
275#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7 280#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
276#define BNX2FC_FLAG_EXPL_LOGO 0x8 281#define BNX2FC_FLAG_EXPL_LOGO 0x8
282#define BNX2FC_FLAG_DISABLE_FAILED 0x9
277 283
278 u8 src_addr[ETH_ALEN]; 284 u8 src_addr[ETH_ALEN];
279 u32 max_sqes; 285 u32 max_sqes;
@@ -554,4 +560,7 @@ void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
554int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, 560int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
555 enum fc_rctl r_ctl); 561 enum fc_rctl r_ctl);
556 562
563
564#include "bnx2fc_debug.h"
565
557#endif 566#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c
new file mode 100644
index 000000000000..0cbee1b23ee2
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c
@@ -0,0 +1,70 @@
1#include "bnx2fc.h"
2
3void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...)
4{
5 struct va_format vaf;
6 va_list args;
7
8 if (likely(!(bnx2fc_debug_level & LOG_IO)))
9 return;
10
11 va_start(args, fmt);
12
13 vaf.fmt = fmt;
14 vaf.va = &args;
15
16 if (io_req && io_req->port && io_req->port->lport &&
17 io_req->port->lport->host)
18 shost_printk(KERN_INFO, io_req->port->lport->host,
19 PFX "xid:0x%x %pV",
20 io_req->xid, &vaf);
21 else
22 pr_info("NULL %pV", &vaf);
23
24 va_end(args);
25}
26
27void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...)
28{
29 struct va_format vaf;
30 va_list args;
31
32 if (likely(!(bnx2fc_debug_level & LOG_TGT)))
33 return;
34
35 va_start(args, fmt);
36
37 vaf.fmt = fmt;
38 vaf.va = &args;
39
40 if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host &&
41 tgt->rport)
42 shost_printk(KERN_INFO, tgt->port->lport->host,
43 PFX "port:%x %pV",
44 tgt->rport->port_id, &vaf);
45 else
46 pr_info("NULL %pV", &vaf);
47
48 va_end(args);
49}
50
51void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...)
52{
53 struct va_format vaf;
54 va_list args;
55
56 if (likely(!(bnx2fc_debug_level & LOG_HBA)))
57 return;
58
59 va_start(args, fmt);
60
61 vaf.fmt = fmt;
62 vaf.va = &args;
63
64 if (lport && lport->host)
65 shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf);
66 else
67 pr_info("NULL %pV", &vaf);
68
69 va_end(args);
70}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 3416d9a746c7..4808ff99621f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -11,60 +11,23 @@
11 11
12extern unsigned int bnx2fc_debug_level; 12extern unsigned int bnx2fc_debug_level;
13 13
14#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \ 14#define BNX2FC_ELS_DBG(fmt, ...) \
15 do { \ 15do { \
16 if (unlikely(bnx2fc_debug_level & LEVEL)) \ 16 if (unlikely(bnx2fc_debug_level & LOG_ELS)) \
17 do { \ 17 pr_info(fmt, ##__VA_ARGS__); \
18 CMD; \ 18} while (0)
19 } while (0); \ 19
20 } while (0) 20#define BNX2FC_MISC_DBG(fmt, ...) \
21 21do { \
22#define BNX2FC_ELS_DBG(fmt, arg...) \ 22 if (unlikely(bnx2fc_debug_level & LOG_MISC)) \
23 BNX2FC_CHK_LOGGING(LOG_ELS, \ 23 pr_info(fmt, ##__VA_ARGS__); \
24 printk(KERN_INFO PFX fmt, ##arg)) 24} while (0)
25 25
26#define BNX2FC_MISC_DBG(fmt, arg...) \ 26__printf(2, 3)
27 BNX2FC_CHK_LOGGING(LOG_MISC, \ 27void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...);
28 printk(KERN_INFO PFX fmt, ##arg)) 28__printf(2, 3)
29 29void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...);
30#define BNX2FC_IO_DBG(io_req, fmt, arg...) \ 30__printf(2, 3)
31 do { \ 31void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...);
32 if (!io_req || !io_req->port || !io_req->port->lport || \
33 !io_req->port->lport->host) \
34 BNX2FC_CHK_LOGGING(LOG_IO, \
35 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
36 else \
37 BNX2FC_CHK_LOGGING(LOG_IO, \
38 shost_printk(KERN_INFO, \
39 (io_req)->port->lport->host, \
40 PFX "xid:0x%x " fmt, \
41 (io_req)->xid, ##arg)); \
42 } while (0)
43
44#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \
45 do { \
46 if (!tgt || !tgt->port || !tgt->port->lport || \
47 !tgt->port->lport->host || !tgt->rport) \
48 BNX2FC_CHK_LOGGING(LOG_TGT, \
49 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
50 else \
51 BNX2FC_CHK_LOGGING(LOG_TGT, \
52 shost_printk(KERN_INFO, \
53 (tgt)->port->lport->host, \
54 PFX "port:%x " fmt, \
55 (tgt)->rport->port_id, ##arg)); \
56 } while (0)
57
58
59#define BNX2FC_HBA_DBG(lport, fmt, arg...) \
60 do { \
61 if (!lport || !lport->host) \
62 BNX2FC_CHK_LOGGING(LOG_HBA, \
63 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
64 else \
65 BNX2FC_CHK_LOGGING(LOG_HBA, \
66 shost_printk(KERN_INFO, lport->host, \
67 PFX fmt, ##arg)); \
68 } while (0)
69 32
70#endif 33#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f52f668fd247..ae1cb7639d99 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Apr 24, 2012" 25#define DRV_MODULE_RELDATE "Jun 04, 2012"
26 26
27 27
28static char version[] __devinitdata = 28static char version[] __devinitdata =
@@ -286,7 +286,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
286 struct fcoe_port *port; 286 struct fcoe_port *port;
287 struct fcoe_hdr *hp; 287 struct fcoe_hdr *hp;
288 struct bnx2fc_rport *tgt; 288 struct bnx2fc_rport *tgt;
289 struct fcoe_dev_stats *stats; 289 struct fc_stats *stats;
290 u8 sof, eof; 290 u8 sof, eof;
291 u32 crc; 291 u32 crc;
292 unsigned int hlen, tlen, elen; 292 unsigned int hlen, tlen, elen;
@@ -412,7 +412,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
412 } 412 }
413 413
414 /*update tx stats */ 414 /*update tx stats */
415 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 415 stats = per_cpu_ptr(lport->stats, get_cpu());
416 stats->TxFrames++; 416 stats->TxFrames++;
417 stats->TxWords += wlen; 417 stats->TxWords += wlen;
418 put_cpu(); 418 put_cpu();
@@ -522,7 +522,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
522 u32 fr_len; 522 u32 fr_len;
523 struct fc_lport *lport; 523 struct fc_lport *lport;
524 struct fcoe_rcv_info *fr; 524 struct fcoe_rcv_info *fr;
525 struct fcoe_dev_stats *stats; 525 struct fc_stats *stats;
526 struct fc_frame_header *fh; 526 struct fc_frame_header *fh;
527 struct fcoe_crc_eof crc_eof; 527 struct fcoe_crc_eof crc_eof;
528 struct fc_frame *fp; 528 struct fc_frame *fp;
@@ -551,7 +551,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
551 skb_pull(skb, sizeof(struct fcoe_hdr)); 551 skb_pull(skb, sizeof(struct fcoe_hdr));
552 fr_len = skb->len - sizeof(struct fcoe_crc_eof); 552 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
553 553
554 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 554 stats = per_cpu_ptr(lport->stats, get_cpu());
555 stats->RxFrames++; 555 stats->RxFrames++;
556 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; 556 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
557 557
@@ -942,7 +942,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
942 FC_PORTTYPE_UNKNOWN; 942 FC_PORTTYPE_UNKNOWN;
943 mutex_unlock(&lport->lp_mutex); 943 mutex_unlock(&lport->lp_mutex);
944 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; 944 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
945 per_cpu_ptr(lport->dev_stats, 945 per_cpu_ptr(lport->stats,
946 get_cpu())->LinkFailureCount++; 946 get_cpu())->LinkFailureCount++;
947 put_cpu(); 947 put_cpu();
948 fcoe_clean_pending_queue(lport); 948 fcoe_clean_pending_queue(lport);
@@ -1326,6 +1326,7 @@ static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
1326static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) 1326static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1327{ 1327{
1328 struct bnx2fc_hba *hba; 1328 struct bnx2fc_hba *hba;
1329 struct fcoe_capabilities *fcoe_cap;
1329 int rc; 1330 int rc;
1330 1331
1331 hba = kzalloc(sizeof(*hba), GFP_KERNEL); 1332 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
@@ -1361,6 +1362,21 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1361 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); 1362 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1362 goto cmgr_err; 1363 goto cmgr_err;
1363 } 1364 }
1365 fcoe_cap = &hba->fcoe_cap;
1366
1367 fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES <<
1368 FCOE_IOS_PER_CONNECTION_SHIFT;
1369 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
1370 FCOE_LOGINS_PER_PORT_SHIFT;
1371 fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS <<
1372 FCOE_NUMBER_OF_EXCHANGES_SHIFT;
1373 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
1374 FCOE_NPIV_WWN_PER_PORT_SHIFT;
1375 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
1376 FCOE_TARGETS_SUPPORTED_SHIFT;
1377 fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS <<
1378 FCOE_OUTSTANDING_COMMANDS_SHIFT;
1379 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
1364 1380
1365 init_waitqueue_head(&hba->shutdown_wait); 1381 init_waitqueue_head(&hba->shutdown_wait);
1366 init_waitqueue_head(&hba->destroy_wait); 1382 init_waitqueue_head(&hba->destroy_wait);
@@ -1691,6 +1707,32 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1691 hba->pcidev = NULL; 1707 hba->pcidev = NULL;
1692} 1708}
1693 1709
1710/**
1711 * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
1712 *
1713 * @handle: transport handle pointing to adapter struture
1714 */
1715static int bnx2fc_ulp_get_stats(void *handle)
1716{
1717 struct bnx2fc_hba *hba = handle;
1718 struct cnic_dev *cnic;
1719 struct fcoe_stats_info *stats_addr;
1720
1721 if (!hba)
1722 return -EINVAL;
1723
1724 cnic = hba->cnic;
1725 stats_addr = &cnic->stats_addr->fcoe_stat;
1726 if (!stats_addr)
1727 return -EINVAL;
1728
1729 strncpy(stats_addr->version, BNX2FC_VERSION,
1730 sizeof(stats_addr->version));
1731 stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
1732 stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
1733
1734 return 0;
1735}
1694 1736
1695 1737
1696/** 1738/**
@@ -1944,6 +1986,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1944 adapter_count++; 1986 adapter_count++;
1945 mutex_unlock(&bnx2fc_dev_lock); 1987 mutex_unlock(&bnx2fc_dev_lock);
1946 1988
1989 dev->fcoe_cap = &hba->fcoe_cap;
1947 clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); 1990 clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
1948 rc = dev->register_device(dev, CNIC_ULP_FCOE, 1991 rc = dev->register_device(dev, CNIC_ULP_FCOE,
1949 (void *) hba); 1992 (void *) hba);
@@ -2019,11 +2062,11 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
2019 struct fcoe_ctlr *ctlr; 2062 struct fcoe_ctlr *ctlr;
2020 struct bnx2fc_interface *interface; 2063 struct bnx2fc_interface *interface;
2021 struct bnx2fc_hba *hba; 2064 struct bnx2fc_hba *hba;
2022 struct net_device *phys_dev; 2065 struct net_device *phys_dev = netdev;
2023 struct fc_lport *lport; 2066 struct fc_lport *lport;
2024 struct ethtool_drvinfo drvinfo; 2067 struct ethtool_drvinfo drvinfo;
2025 int rc = 0; 2068 int rc = 0;
2026 int vlan_id; 2069 int vlan_id = 0;
2027 2070
2028 BNX2FC_MISC_DBG("Entered bnx2fc_create\n"); 2071 BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
2029 if (fip_mode != FIP_MODE_FABRIC) { 2072 if (fip_mode != FIP_MODE_FABRIC) {
@@ -2041,14 +2084,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
2041 } 2084 }
2042 2085
2043 /* obtain physical netdev */ 2086 /* obtain physical netdev */
2044 if (netdev->priv_flags & IFF_802_1Q_VLAN) { 2087 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2045 phys_dev = vlan_dev_real_dev(netdev); 2088 phys_dev = vlan_dev_real_dev(netdev);
2046 vlan_id = vlan_dev_vlan_id(netdev); 2089
2047 } else {
2048 printk(KERN_ERR PFX "Not a vlan device\n");
2049 rc = -EINVAL;
2050 goto netdev_err;
2051 }
2052 /* verify if the physical device is a netxtreme2 device */ 2090 /* verify if the physical device is a netxtreme2 device */
2053 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { 2091 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
2054 memset(&drvinfo, 0, sizeof(drvinfo)); 2092 memset(&drvinfo, 0, sizeof(drvinfo));
@@ -2083,9 +2121,13 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
2083 goto ifput_err; 2121 goto ifput_err;
2084 } 2122 }
2085 2123
2124 if (netdev->priv_flags & IFF_802_1Q_VLAN) {
2125 vlan_id = vlan_dev_vlan_id(netdev);
2126 interface->vlan_enabled = 1;
2127 }
2128
2086 ctlr = bnx2fc_to_ctlr(interface); 2129 ctlr = bnx2fc_to_ctlr(interface);
2087 interface->vlan_id = vlan_id; 2130 interface->vlan_id = vlan_id;
2088 interface->vlan_enabled = 1;
2089 2131
2090 interface->timer_work_queue = 2132 interface->timer_work_queue =
2091 create_singlethread_workqueue("bnx2fc_timer_wq"); 2133 create_singlethread_workqueue("bnx2fc_timer_wq");
@@ -2152,13 +2194,10 @@ mod_err:
2152 **/ 2194 **/
2153static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic) 2195static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
2154{ 2196{
2155 struct list_head *list;
2156 struct list_head *temp;
2157 struct bnx2fc_hba *hba; 2197 struct bnx2fc_hba *hba;
2158 2198
2159 /* Called with bnx2fc_dev_lock held */ 2199 /* Called with bnx2fc_dev_lock held */
2160 list_for_each_safe(list, temp, &adapter_list) { 2200 list_for_each_entry(hba, &adapter_list, list) {
2161 hba = (struct bnx2fc_hba *)list;
2162 if (hba->cnic == cnic) 2201 if (hba->cnic == cnic)
2163 return hba; 2202 return hba;
2164 } 2203 }
@@ -2252,15 +2291,17 @@ static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
2252 2291
2253static bool bnx2fc_match(struct net_device *netdev) 2292static bool bnx2fc_match(struct net_device *netdev)
2254{ 2293{
2294 struct net_device *phys_dev = netdev;
2295
2255 mutex_lock(&bnx2fc_dev_lock); 2296 mutex_lock(&bnx2fc_dev_lock);
2256 if (netdev->priv_flags & IFF_802_1Q_VLAN) { 2297 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2257 struct net_device *phys_dev = vlan_dev_real_dev(netdev); 2298 phys_dev = vlan_dev_real_dev(netdev);
2258 2299
2259 if (bnx2fc_hba_lookup(phys_dev)) { 2300 if (bnx2fc_hba_lookup(phys_dev)) {
2260 mutex_unlock(&bnx2fc_dev_lock); 2301 mutex_unlock(&bnx2fc_dev_lock);
2261 return true; 2302 return true;
2262 }
2263 } 2303 }
2304
2264 mutex_unlock(&bnx2fc_dev_lock); 2305 mutex_unlock(&bnx2fc_dev_lock);
2265 return false; 2306 return false;
2266} 2307}
@@ -2290,9 +2331,9 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
2290 2331
2291 p = &per_cpu(bnx2fc_percpu, cpu); 2332 p = &per_cpu(bnx2fc_percpu, cpu);
2292 2333
2293 thread = kthread_create(bnx2fc_percpu_io_thread, 2334 thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
2294 (void *)p, 2335 (void *)p, cpu_to_node(cpu),
2295 "bnx2fc_thread/%d", cpu); 2336 "bnx2fc_thread/%d", cpu);
2296 /* bind thread to the cpu */ 2337 /* bind thread to the cpu */
2297 if (likely(!IS_ERR(thread))) { 2338 if (likely(!IS_ERR(thread))) {
2298 kthread_bind(thread, cpu); 2339 kthread_bind(thread, cpu);
@@ -2643,4 +2684,5 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb = {
2643 .cnic_stop = bnx2fc_ulp_stop, 2684 .cnic_stop = bnx2fc_ulp_stop,
2644 .indicate_kcqes = bnx2fc_indicate_kcqe, 2685 .indicate_kcqes = bnx2fc_indicate_kcqe,
2645 .indicate_netevent = bnx2fc_indicate_netevent, 2686 .indicate_netevent = bnx2fc_indicate_netevent,
2687 .cnic_get_stats = bnx2fc_ulp_get_stats,
2646}; 2688};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 2ca6bfe4ce5e..6d6eee42ac7d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1244,7 +1244,9 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1244 if (disable_kcqe->completion_status) { 1244 if (disable_kcqe->completion_status) {
1245 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", 1245 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1246 disable_kcqe->completion_status); 1246 disable_kcqe->completion_status);
1247 return; 1247 set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
1248 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1249 wake_up_interruptible(&tgt->upld_wait);
1248 } else { 1250 } else {
1249 /* disable successful */ 1251 /* disable successful */
1250 BNX2FC_TGT_DBG(tgt, "disable successful\n"); 1252 BNX2FC_TGT_DBG(tgt, "disable successful\n");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4f7453b9e41e..73f231ccd45b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -405,11 +405,10 @@ free_cmd_pool:
405 goto free_cmgr; 405 goto free_cmgr;
406 406
407 for (i = 0; i < num_possible_cpus() + 1; i++) { 407 for (i = 0; i < num_possible_cpus() + 1; i++) {
408 struct list_head *list; 408 struct bnx2fc_cmd *tmp, *io_req;
409 struct list_head *tmp;
410 409
411 list_for_each_safe(list, tmp, &cmgr->free_list[i]) { 410 list_for_each_entry_safe(io_req, tmp,
412 struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list; 411 &cmgr->free_list[i], link) {
413 list_del(&io_req->link); 412 list_del(&io_req->link);
414 kfree(io_req); 413 kfree(io_req);
415 } 414 }
@@ -1436,9 +1435,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1436{ 1435{
1437 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1436 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1438 struct bnx2fc_rport *tgt = io_req->tgt; 1437 struct bnx2fc_rport *tgt = io_req->tgt;
1439 struct list_head *list; 1438 struct bnx2fc_cmd *cmd, *tmp;
1440 struct list_head *tmp;
1441 struct bnx2fc_cmd *cmd;
1442 int tm_lun = sc_cmd->device->lun; 1439 int tm_lun = sc_cmd->device->lun;
1443 int rc = 0; 1440 int rc = 0;
1444 int lun; 1441 int lun;
@@ -1449,9 +1446,8 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1449 * Walk thru the active_ios queue and ABORT the IO 1446 * Walk thru the active_ios queue and ABORT the IO
1450 * that matches with the LUN that was reset 1447 * that matches with the LUN that was reset
1451 */ 1448 */
1452 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { 1449 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1453 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); 1450 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
1454 cmd = (struct bnx2fc_cmd *)list;
1455 lun = cmd->sc_cmd->device->lun; 1451 lun = cmd->sc_cmd->device->lun;
1456 if (lun == tm_lun) { 1452 if (lun == tm_lun) {
1457 /* Initiate ABTS on this cmd */ 1453 /* Initiate ABTS on this cmd */
@@ -1476,9 +1472,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1476static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) 1472static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1477{ 1473{
1478 struct bnx2fc_rport *tgt = io_req->tgt; 1474 struct bnx2fc_rport *tgt = io_req->tgt;
1479 struct list_head *list; 1475 struct bnx2fc_cmd *cmd, *tmp;
1480 struct list_head *tmp;
1481 struct bnx2fc_cmd *cmd;
1482 int rc = 0; 1476 int rc = 0;
1483 1477
1484 /* called with tgt_lock held */ 1478 /* called with tgt_lock held */
@@ -1487,9 +1481,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1487 * Walk thru the active_ios queue and ABORT the IO 1481 * Walk thru the active_ios queue and ABORT the IO
1488 * that matches with the LUN that was reset 1482 * that matches with the LUN that was reset
1489 */ 1483 */
1490 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { 1484 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1491 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); 1485 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
1492 cmd = (struct bnx2fc_cmd *)list;
1493 /* Initiate ABTS */ 1486 /* Initiate ABTS */
1494 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1487 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1495 &cmd->req_flags)) { 1488 &cmd->req_flags)) {
@@ -1980,7 +1973,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1980 struct bnx2fc_interface *interface = port->priv; 1973 struct bnx2fc_interface *interface = port->priv;
1981 struct bnx2fc_hba *hba = interface->hba; 1974 struct bnx2fc_hba *hba = interface->hba;
1982 struct fc_lport *lport = port->lport; 1975 struct fc_lport *lport = port->lport;
1983 struct fcoe_dev_stats *stats; 1976 struct fc_stats *stats;
1984 int task_idx, index; 1977 int task_idx, index;
1985 u16 xid; 1978 u16 xid;
1986 1979
@@ -1991,7 +1984,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1991 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 1984 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
1992 sc_cmd->SCp.ptr = (char *)io_req; 1985 sc_cmd->SCp.ptr = (char *)io_req;
1993 1986
1994 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1987 stats = per_cpu_ptr(lport->stats, get_cpu());
1995 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1988 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1996 io_req->io_req_flags = BNX2FC_READ; 1989 io_req->io_req_flags = BNX2FC_READ;
1997 stats->InputRequests++; 1990 stats->InputRequests++;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 082a25c3117e..b9d0d9cb17f9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -150,8 +150,7 @@ tgt_init_err:
150void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) 150void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
151{ 151{
152 struct bnx2fc_cmd *io_req; 152 struct bnx2fc_cmd *io_req;
153 struct list_head *list; 153 struct bnx2fc_cmd *tmp;
154 struct list_head *tmp;
155 int rc; 154 int rc;
156 int i = 0; 155 int i = 0;
157 BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n", 156 BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
@@ -160,9 +159,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
160 spin_lock_bh(&tgt->tgt_lock); 159 spin_lock_bh(&tgt->tgt_lock);
161 tgt->flush_in_prog = 1; 160 tgt->flush_in_prog = 1;
162 161
163 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { 162 list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
164 i++; 163 i++;
165 io_req = (struct bnx2fc_cmd *)list;
166 list_del_init(&io_req->link); 164 list_del_init(&io_req->link);
167 io_req->on_active_queue = 0; 165 io_req->on_active_queue = 0;
168 BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); 166 BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
@@ -181,13 +179,18 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
181 179
182 set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags); 180 set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
183 set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); 181 set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
184 rc = bnx2fc_initiate_cleanup(io_req); 182
185 BUG_ON(rc); 183 /* Do not issue cleanup when disable request failed */
184 if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
185 bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
186 else {
187 rc = bnx2fc_initiate_cleanup(io_req);
188 BUG_ON(rc);
189 }
186 } 190 }
187 191
188 list_for_each_safe(list, tmp, &tgt->active_tm_queue) { 192 list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) {
189 i++; 193 i++;
190 io_req = (struct bnx2fc_cmd *)list;
191 list_del_init(&io_req->link); 194 list_del_init(&io_req->link);
192 io_req->on_tmf_queue = 0; 195 io_req->on_tmf_queue = 0;
193 BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n"); 196 BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
@@ -195,9 +198,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
195 complete(&io_req->tm_done); 198 complete(&io_req->tm_done);
196 } 199 }
197 200
198 list_for_each_safe(list, tmp, &tgt->els_queue) { 201 list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
199 i++; 202 i++;
200 io_req = (struct bnx2fc_cmd *)list;
201 list_del_init(&io_req->link); 203 list_del_init(&io_req->link);
202 io_req->on_active_queue = 0; 204 io_req->on_active_queue = 0;
203 205
@@ -212,13 +214,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
212 io_req->cb_arg = NULL; 214 io_req->cb_arg = NULL;
213 } 215 }
214 216
215 rc = bnx2fc_initiate_cleanup(io_req); 217 /* Do not issue cleanup when disable request failed */
216 BUG_ON(rc); 218 if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
219 bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
220 else {
221 rc = bnx2fc_initiate_cleanup(io_req);
222 BUG_ON(rc);
223 }
217 } 224 }
218 225
219 list_for_each_safe(list, tmp, &tgt->io_retire_queue) { 226 list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) {
220 i++; 227 i++;
221 io_req = (struct bnx2fc_cmd *)list;
222 list_del_init(&io_req->link); 228 list_del_init(&io_req->link);
223 229
224 BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); 230 BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
@@ -321,9 +327,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
321 327
322 del_timer_sync(&tgt->upld_timer); 328 del_timer_sync(&tgt->upld_timer);
323 329
324 } else 330 } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
331 printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
332 " not sent to FW\n");
333 } else {
325 printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy" 334 printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
326 " not sent to FW\n"); 335 " not sent to FW\n");
336 }
327 337
328 /* Free session resources */ 338 /* Free session resources */
329 bnx2fc_free_session_resc(hba, tgt); 339 bnx2fc_free_session_resc(hba, tgt);
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index dc0a08e69c82..f2db5fe7bdc2 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -267,7 +267,13 @@ struct bnx2i_cmd_request {
267 * task statistics for write response 267 * task statistics for write response
268 */ 268 */
269struct bnx2i_write_resp_task_stat { 269struct bnx2i_write_resp_task_stat {
270 u32 num_data_ins; 270#if defined(__BIG_ENDIAN)
271 u16 num_r2ts;
272 u16 num_data_outs;
273#elif defined(__LITTLE_ENDIAN)
274 u16 num_data_outs;
275 u16 num_r2ts;
276#endif
271}; 277};
272 278
273/* 279/*
@@ -275,11 +281,11 @@ struct bnx2i_write_resp_task_stat {
275 */ 281 */
276struct bnx2i_read_resp_task_stat { 282struct bnx2i_read_resp_task_stat {
277#if defined(__BIG_ENDIAN) 283#if defined(__BIG_ENDIAN)
278 u16 num_data_outs; 284 u16 reserved;
279 u16 num_r2ts; 285 u16 num_data_ins;
280#elif defined(__LITTLE_ENDIAN) 286#elif defined(__LITTLE_ENDIAN)
281 u16 num_r2ts; 287 u16 num_data_ins;
282 u16 num_data_outs; 288 u16 reserved;
283#endif 289#endif
284}; 290};
285 291
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0c53c28dc3d3..3f9e7061258e 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -44,6 +44,8 @@
44#include "57xx_iscsi_hsi.h" 44#include "57xx_iscsi_hsi.h"
45#include "57xx_iscsi_constants.h" 45#include "57xx_iscsi_constants.h"
46 46
47#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
48
47#define BNX2_ISCSI_DRIVER_NAME "bnx2i" 49#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
48 50
49#define BNX2I_MAX_ADAPTERS 8 51#define BNX2I_MAX_ADAPTERS 8
@@ -126,6 +128,43 @@
126#define REG_WR(__hba, offset, val) \ 128#define REG_WR(__hba, offset, val) \
127 writel(val, __hba->regview + offset) 129 writel(val, __hba->regview + offset)
128 130
131#ifdef CONFIG_32BIT
132#define GET_STATS_64(__hba, dst, field) \
133 do { \
134 spin_lock_bh(&__hba->stat_lock); \
135 dst->field##_lo = __hba->stats.field##_lo; \
136 dst->field##_hi = __hba->stats.field##_hi; \
137 spin_unlock_bh(&__hba->stat_lock); \
138 } while (0)
139
140#define ADD_STATS_64(__hba, field, len) \
141 do { \
142 if (spin_trylock(&__hba->stat_lock)) { \
143 if (__hba->stats.field##_lo + len < \
144 __hba->stats.field##_lo) \
145 __hba->stats.field##_hi++; \
146 __hba->stats.field##_lo += len; \
147 spin_unlock(&__hba->stat_lock); \
148 } \
149 } while (0)
150
151#else
152#define GET_STATS_64(__hba, dst, field) \
153 do { \
154 u64 val, *out; \
155 \
156 val = __hba->bnx2i_stats.field; \
157 out = (u64 *)&__hba->stats.field##_lo; \
158 *out = cpu_to_le64(val); \
159 out = (u64 *)&dst->field##_lo; \
160 *out = cpu_to_le64(val); \
161 } while (0)
162
163#define ADD_STATS_64(__hba, field, len) \
164 do { \
165 __hba->bnx2i_stats.field += len; \
166 } while (0)
167#endif
129 168
130/** 169/**
131 * struct generic_pdu_resc - login pdu resource structure 170 * struct generic_pdu_resc - login pdu resource structure
@@ -288,6 +327,15 @@ struct iscsi_cid_queue {
288 struct bnx2i_conn **conn_cid_tbl; 327 struct bnx2i_conn **conn_cid_tbl;
289}; 328};
290 329
330
331struct bnx2i_stats_info {
332 u64 rx_pdus;
333 u64 rx_bytes;
334 u64 tx_pdus;
335 u64 tx_bytes;
336};
337
338
291/** 339/**
292 * struct bnx2i_hba - bnx2i adapter structure 340 * struct bnx2i_hba - bnx2i adapter structure
293 * 341 *
@@ -341,6 +389,8 @@ struct iscsi_cid_queue {
341 * @ctx_ccell_tasks: captures number of ccells and tasks supported by 389 * @ctx_ccell_tasks: captures number of ccells and tasks supported by
342 * currently offloaded connection, used to decode 390 * currently offloaded connection, used to decode
343 * context memory 391 * context memory
392 * @stat_lock: spin lock used by the statistic collector (32 bit)
393 * @stats: local iSCSI statistic collection place holder
344 * 394 *
345 * Adapter Data Structure 395 * Adapter Data Structure
346 */ 396 */
@@ -350,6 +400,7 @@ struct bnx2i_hba {
350 struct pci_dev *pcidev; 400 struct pci_dev *pcidev;
351 struct net_device *netdev; 401 struct net_device *netdev;
352 void __iomem *regview; 402 void __iomem *regview;
403 resource_size_t reg_base;
353 404
354 u32 age; 405 u32 age;
355 unsigned long cnic_dev_type; 406 unsigned long cnic_dev_type;
@@ -426,6 +477,12 @@ struct bnx2i_hba {
426 u32 num_sess_opened; 477 u32 num_sess_opened;
427 u32 num_conn_opened; 478 u32 num_conn_opened;
428 unsigned int ctx_ccell_tasks; 479 unsigned int ctx_ccell_tasks;
480
481#ifdef CONFIG_32BIT
482 spinlock_t stat_lock;
483#endif
484 struct bnx2i_stats_info bnx2i_stats;
485 struct iscsi_stats_info stats;
429}; 486};
430 487
431 488
@@ -749,6 +806,8 @@ extern void bnx2i_ulp_init(struct cnic_dev *dev);
749extern void bnx2i_ulp_exit(struct cnic_dev *dev); 806extern void bnx2i_ulp_exit(struct cnic_dev *dev);
750extern void bnx2i_start(void *handle); 807extern void bnx2i_start(void *handle);
751extern void bnx2i_stop(void *handle); 808extern void bnx2i_stop(void *handle);
809extern int bnx2i_get_stats(void *handle);
810
752extern struct bnx2i_hba *get_adapter_list_head(void); 811extern struct bnx2i_hba *get_adapter_list_head(void);
753 812
754struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, 813struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index ece47e502282..33d6630529de 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1350,6 +1350,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1350 struct cqe *cqe) 1350 struct cqe *cqe)
1351{ 1351{
1352 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1352 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1353 struct bnx2i_hba *hba = bnx2i_conn->hba;
1353 struct bnx2i_cmd_response *resp_cqe; 1354 struct bnx2i_cmd_response *resp_cqe;
1354 struct bnx2i_cmd *bnx2i_cmd; 1355 struct bnx2i_cmd *bnx2i_cmd;
1355 struct iscsi_task *task; 1356 struct iscsi_task *task;
@@ -1367,16 +1368,26 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1367 1368
1368 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { 1369 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1369 conn->datain_pdus_cnt += 1370 conn->datain_pdus_cnt +=
1370 resp_cqe->task_stat.read_stat.num_data_outs; 1371 resp_cqe->task_stat.read_stat.num_data_ins;
1371 conn->rxdata_octets += 1372 conn->rxdata_octets +=
1372 bnx2i_cmd->req.total_data_transfer_length; 1373 bnx2i_cmd->req.total_data_transfer_length;
1374 ADD_STATS_64(hba, rx_pdus,
1375 resp_cqe->task_stat.read_stat.num_data_ins);
1376 ADD_STATS_64(hba, rx_bytes,
1377 bnx2i_cmd->req.total_data_transfer_length);
1373 } else { 1378 } else {
1374 conn->dataout_pdus_cnt += 1379 conn->dataout_pdus_cnt +=
1375 resp_cqe->task_stat.read_stat.num_data_outs; 1380 resp_cqe->task_stat.write_stat.num_data_outs;
1376 conn->r2t_pdus_cnt += 1381 conn->r2t_pdus_cnt +=
1377 resp_cqe->task_stat.read_stat.num_r2ts; 1382 resp_cqe->task_stat.write_stat.num_r2ts;
1378 conn->txdata_octets += 1383 conn->txdata_octets +=
1379 bnx2i_cmd->req.total_data_transfer_length; 1384 bnx2i_cmd->req.total_data_transfer_length;
1385 ADD_STATS_64(hba, tx_pdus,
1386 resp_cqe->task_stat.write_stat.num_data_outs);
1387 ADD_STATS_64(hba, tx_bytes,
1388 bnx2i_cmd->req.total_data_transfer_length);
1389 ADD_STATS_64(hba, rx_pdus,
1390 resp_cqe->task_stat.write_stat.num_r2ts);
1380 } 1391 }
1381 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); 1392 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1382 1393
@@ -1961,6 +1972,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1961{ 1972{
1962 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1973 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1963 struct iscsi_session *session = conn->session; 1974 struct iscsi_session *session = conn->session;
1975 struct bnx2i_hba *hba = bnx2i_conn->hba;
1964 struct qp_info *qp; 1976 struct qp_info *qp;
1965 struct bnx2i_nop_in_msg *nopin; 1977 struct bnx2i_nop_in_msg *nopin;
1966 int tgt_async_msg; 1978 int tgt_async_msg;
@@ -1973,7 +1985,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1973 1985
1974 if (!qp->cq_virt) { 1986 if (!qp->cq_virt) {
1975 printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!", 1987 printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
1976 bnx2i_conn->hba->netdev->name); 1988 hba->netdev->name);
1977 goto out; 1989 goto out;
1978 } 1990 }
1979 while (1) { 1991 while (1) {
@@ -1985,9 +1997,9 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1985 if (nopin->op_code == ISCSI_OP_NOOP_IN && 1997 if (nopin->op_code == ISCSI_OP_NOOP_IN &&
1986 nopin->itt == (u16) RESERVED_ITT) { 1998 nopin->itt == (u16) RESERVED_ITT) {
1987 printk(KERN_ALERT "bnx2i: Unsolicited " 1999 printk(KERN_ALERT "bnx2i: Unsolicited "
1988 "NOP-In detected for suspended " 2000 "NOP-In detected for suspended "
1989 "connection dev=%s!\n", 2001 "connection dev=%s!\n",
1990 bnx2i_conn->hba->netdev->name); 2002 hba->netdev->name);
1991 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); 2003 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1992 goto cqe_out; 2004 goto cqe_out;
1993 } 2005 }
@@ -2001,7 +2013,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
2001 /* Run the kthread engine only for data cmds 2013 /* Run the kthread engine only for data cmds
2002 All other cmds will be completed in this bh! */ 2014 All other cmds will be completed in this bh! */
2003 bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin); 2015 bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
2004 break; 2016 goto done;
2005 case ISCSI_OP_LOGIN_RSP: 2017 case ISCSI_OP_LOGIN_RSP:
2006 bnx2i_process_login_resp(session, bnx2i_conn, 2018 bnx2i_process_login_resp(session, bnx2i_conn,
2007 qp->cq_cons_qe); 2019 qp->cq_cons_qe);
@@ -2044,11 +2056,15 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
2044 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", 2056 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2045 nopin->op_code); 2057 nopin->op_code);
2046 } 2058 }
2059
2060 ADD_STATS_64(hba, rx_pdus, 1);
2061 ADD_STATS_64(hba, rx_bytes, nopin->data_length);
2062done:
2047 if (!tgt_async_msg) { 2063 if (!tgt_async_msg) {
2048 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds)) 2064 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
2049 printk(KERN_ALERT "bnx2i (%s): no active cmd! " 2065 printk(KERN_ALERT "bnx2i (%s): no active cmd! "
2050 "op 0x%x\n", 2066 "op 0x%x\n",
2051 bnx2i_conn->hba->netdev->name, 2067 hba->netdev->name,
2052 nopin->op_code); 2068 nopin->op_code);
2053 else 2069 else
2054 atomic_dec(&bnx2i_conn->ep->num_active_cmds); 2070 atomic_dec(&bnx2i_conn->ep->num_active_cmds);
@@ -2692,6 +2708,7 @@ struct cnic_ulp_ops bnx2i_cnic_cb = {
2692 .cm_remote_close = bnx2i_cm_remote_close, 2708 .cm_remote_close = bnx2i_cm_remote_close,
2693 .cm_remote_abort = bnx2i_cm_remote_abort, 2709 .cm_remote_abort = bnx2i_cm_remote_abort,
2694 .iscsi_nl_send_msg = bnx2i_send_nl_mesg, 2710 .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2711 .cnic_get_stats = bnx2i_get_stats,
2695 .owner = THIS_MODULE 2712 .owner = THIS_MODULE
2696}; 2713};
2697 2714
@@ -2724,7 +2741,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2724 goto arm_cq; 2741 goto arm_cq;
2725 } 2742 }
2726 2743
2727 reg_base = ep->hba->netdev->base_addr;
2728 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && 2744 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
2729 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { 2745 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
2730 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); 2746 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
@@ -2740,7 +2756,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2740 /* 5709 device in normal node and 5706/5708 devices */ 2756 /* 5709 device in normal node and 5706/5708 devices */
2741 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); 2757 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2742 2758
2743 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 2759 ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
2744 MB_KERNEL_CTX_SIZE); 2760 MB_KERNEL_CTX_SIZE);
2745 if (!ep->qp.ctx_base) 2761 if (!ep->qp.ctx_base)
2746 return -ENOMEM; 2762 return -ENOMEM;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 8b6816706ee5..b17637aab9a7 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -381,6 +381,46 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
381 381
382 382
383/** 383/**
384 * bnx2i_get_stats - Retrieve various statistic from iSCSI offload
385 * @handle: bnx2i_hba
386 *
387 * function callback exported via bnx2i - cnic driver interface to
388 * retrieve various iSCSI offload related statistics.
389 */
390int bnx2i_get_stats(void *handle)
391{
392 struct bnx2i_hba *hba = handle;
393 struct iscsi_stats_info *stats;
394
395 if (!hba)
396 return -EINVAL;
397
398 stats = (struct iscsi_stats_info *)hba->cnic->stats_addr;
399
400 if (!stats)
401 return -ENOMEM;
402
403 strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version));
404 memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN);
405
406 stats->max_frame_size = hba->netdev->mtu;
407 stats->txq_size = hba->max_sqes;
408 stats->rxq_size = hba->max_cqes;
409
410 stats->txq_avg_depth = 0;
411 stats->rxq_avg_depth = 0;
412
413 GET_STATS_64(hba, stats, rx_pdus);
414 GET_STATS_64(hba, stats, rx_bytes);
415
416 GET_STATS_64(hba, stats, tx_pdus);
417 GET_STATS_64(hba, stats, tx_bytes);
418
419 return 0;
420}
421
422
423/**
384 * bnx2i_percpu_thread_create - Create a receive thread for an 424 * bnx2i_percpu_thread_create - Create a receive thread for an
385 * online CPU 425 * online CPU
386 * 426 *
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f8d516b53161..3b34c13e2f02 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
811 bnx2i_identify_device(hba); 811 bnx2i_identify_device(hba);
812 bnx2i_setup_host_queue_size(hba, shost); 812 bnx2i_setup_host_queue_size(hba, shost);
813 813
814 hba->reg_base = pci_resource_start(hba->pcidev, 0);
814 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { 815 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
815 hba->regview = ioremap_nocache(hba->netdev->base_addr, 816 hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
816 BNX2_MQ_CONFIG2);
817 if (!hba->regview) 817 if (!hba->regview)
818 goto ioreg_map_err; 818 goto ioreg_map_err;
819 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 819 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
820 hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096); 820 hba->regview = pci_iomap(hba->pcidev, 0, 4096);
821 if (!hba->regview) 821 if (!hba->regview)
822 goto ioreg_map_err; 822 goto ioreg_map_err;
823 } 823 }
@@ -874,6 +874,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
874 hba->conn_ctx_destroy_tmo = 2 * HZ; 874 hba->conn_ctx_destroy_tmo = 2 * HZ;
875 } 875 }
876 876
877#ifdef CONFIG_32BIT
878 spin_lock_init(&hba->stat_lock);
879#endif
880 memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
881
877 if (iscsi_host_add(shost, &hba->pcidev->dev)) 882 if (iscsi_host_add(shost, &hba->pcidev->dev))
878 goto free_dump_mem; 883 goto free_dump_mem;
879 return hba; 884 return hba;
@@ -884,7 +889,7 @@ cid_que_err:
884 bnx2i_free_mp_bdt(hba); 889 bnx2i_free_mp_bdt(hba);
885mp_bdt_mem_err: 890mp_bdt_mem_err:
886 if (hba->regview) { 891 if (hba->regview) {
887 iounmap(hba->regview); 892 pci_iounmap(hba->pcidev, hba->regview);
888 hba->regview = NULL; 893 hba->regview = NULL;
889 } 894 }
890ioreg_map_err: 895ioreg_map_err:
@@ -910,7 +915,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
910 pci_dev_put(hba->pcidev); 915 pci_dev_put(hba->pcidev);
911 916
912 if (hba->regview) { 917 if (hba->regview) {
913 iounmap(hba->regview); 918 pci_iounmap(hba->pcidev, hba->regview);
914 hba->regview = NULL; 919 hba->regview = NULL;
915 } 920 }
916 bnx2i_free_mp_bdt(hba); 921 bnx2i_free_mp_bdt(hba);
@@ -1181,12 +1186,18 @@ static int
1181bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) 1186bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1182{ 1187{
1183 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1188 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1189 struct bnx2i_hba *hba = bnx2i_conn->hba;
1184 struct bnx2i_cmd *cmd = task->dd_data; 1190 struct bnx2i_cmd *cmd = task->dd_data;
1185 1191
1186 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); 1192 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1187 1193
1188 bnx2i_setup_cmd_wqe_template(cmd); 1194 bnx2i_setup_cmd_wqe_template(cmd);
1189 bnx2i_conn->gen_pdu.req_buf_size = task->data_count; 1195 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1196
1197 /* Tx PDU/data length count */
1198 ADD_STATS_64(hba, tx_pdus, 1);
1199 ADD_STATS_64(hba, tx_bytes, task->data_count);
1200
1190 if (task->data_count) { 1201 if (task->data_count) {
1191 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, 1202 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1192 task->data_count); 1203 task->data_count);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 36739da8bc15..49692a1ac44a 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,8 @@ static int init_act_open(struct cxgbi_sock *csk)
966 csk->saddr.sin_addr.s_addr = chba->ipv4addr; 966 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
967 967
968 csk->rss_qid = 0; 968 csk->rss_qid = 0;
969 csk->l2t = t3_l2t_get(t3dev, dst, ndev); 969 csk->l2t = t3_l2t_get(t3dev, dst, ndev,
970 &csk->daddr.sin_addr.s_addr);
970 if (!csk->l2t) { 971 if (!csk->l2t) {
971 pr_err("NO l2t available.\n"); 972 pr_err("NO l2t available.\n");
972 return -EINVAL; 973 return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a4a3bfc60cf..f924b3c3720e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -438,8 +438,8 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
438 if (submode) 438 if (submode)
439 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) | 439 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
440 FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode); 440 FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
441 req->tunnel_to_proxy = htonl(wr_ulp_mode) | 441 req->tunnel_to_proxy = htonl(wr_ulp_mode |
442 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1); 442 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
443 req->plen = htonl(len); 443 req->plen = htonl(len);
444 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 444 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
445 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 445 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
@@ -1142,7 +1142,7 @@ static int init_act_open(struct cxgbi_sock *csk)
1142 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1142 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1143 cxgbi_sock_get(csk); 1143 cxgbi_sock_get(csk);
1144 1144
1145 n = dst_get_neighbour_noref(csk->dst); 1145 n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
1146 if (!n) { 1146 if (!n) {
1147 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1147 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1148 goto rel_resource; 1148 goto rel_resource;
@@ -1182,9 +1182,12 @@ static int init_act_open(struct cxgbi_sock *csk)
1182 1182
1183 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1183 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1184 send_act_open_req(csk, skb, csk->l2t); 1184 send_act_open_req(csk, skb, csk->l2t);
1185 neigh_release(n);
1185 return 0; 1186 return 0;
1186 1187
1187rel_resource: 1188rel_resource:
1189 if (n)
1190 neigh_release(n);
1188 if (skb) 1191 if (skb)
1189 __kfree_skb(skb); 1192 __kfree_skb(skb);
1190 return -EINVAL; 1193 return -EINVAL;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d9253db1d0e2..b44c1cff3114 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -494,7 +494,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
494 goto err_out; 494 goto err_out;
495 } 495 }
496 dst = &rt->dst; 496 dst = &rt->dst;
497 n = dst_get_neighbour_noref(dst); 497 n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
498 if (!n) { 498 if (!n) {
499 err = -ENODEV; 499 err = -ENODEV;
500 goto rel_rt; 500 goto rel_rt;
@@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
506 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 506 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
507 ndev->name); 507 ndev->name);
508 err = -ENETUNREACH; 508 err = -ENETUNREACH;
509 goto rel_rt; 509 goto rel_neigh;
510 } 510 }
511 511
512 if (ndev->flags & IFF_LOOPBACK) { 512 if (ndev->flags & IFF_LOOPBACK) {
@@ -521,7 +521,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
521 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 521 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
522 &daddr->sin_addr.s_addr, ndev->name); 522 &daddr->sin_addr.s_addr, ndev->name);
523 err = -ENETUNREACH; 523 err = -ENETUNREACH;
524 goto rel_rt; 524 goto rel_neigh;
525 } 525 }
526 log_debug(1 << CXGBI_DBG_SOCK, 526 log_debug(1 << CXGBI_DBG_SOCK,
527 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", 527 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
@@ -531,7 +531,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
531 csk = cxgbi_sock_create(cdev); 531 csk = cxgbi_sock_create(cdev);
532 if (!csk) { 532 if (!csk) {
533 err = -ENOMEM; 533 err = -ENOMEM;
534 goto rel_rt; 534 goto rel_neigh;
535 } 535 }
536 csk->cdev = cdev; 536 csk->cdev = cdev;
537 csk->port_id = port; 537 csk->port_id = port;
@@ -541,9 +541,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
541 csk->daddr.sin_port = daddr->sin_port; 541 csk->daddr.sin_port = daddr->sin_port;
542 csk->daddr.sin_family = daddr->sin_family; 542 csk->daddr.sin_family = daddr->sin_family;
543 csk->saddr.sin_addr.s_addr = fl4.saddr; 543 csk->saddr.sin_addr.s_addr = fl4.saddr;
544 neigh_release(n);
544 545
545 return csk; 546 return csk;
546 547
548rel_neigh:
549 neigh_release(n);
550
547rel_rt: 551rel_rt:
548 ip_rt_put(rt); 552 ip_rt_put(rt);
549 if (csk) 553 if (csk)
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 48e46f5b77cc..33e422e75835 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -468,7 +468,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
468 468
469/* 469/*
470 * scsi_dh_attach - Attach device handler 470 * scsi_dh_attach - Attach device handler
471 * @sdev - sdev the handler should be attached to 471 * @q - Request queue that is associated with the scsi_device
472 * the handler should be attached to
472 * @name - name of the handler to attach 473 * @name - name of the handler to attach
473 */ 474 */
474int scsi_dh_attach(struct request_queue *q, const char *name) 475int scsi_dh_attach(struct request_queue *q, const char *name)
@@ -498,7 +499,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_attach);
498 499
499/* 500/*
500 * scsi_dh_detach - Detach device handler 501 * scsi_dh_detach - Detach device handler
501 * @sdev - sdev the handler should be detached from 502 * @q - Request queue that is associated with the scsi_device
503 * the handler should be detached from
502 * 504 *
503 * This function will detach the device handler only 505 * This function will detach the device handler only
504 * if the sdev is not part of the internal list, ie 506 * if the sdev is not part of the internal list, ie
@@ -527,6 +529,38 @@ void scsi_dh_detach(struct request_queue *q)
527} 529}
528EXPORT_SYMBOL_GPL(scsi_dh_detach); 530EXPORT_SYMBOL_GPL(scsi_dh_detach);
529 531
532/*
533 * scsi_dh_attached_handler_name - Get attached device handler's name
534 * @q - Request queue that is associated with the scsi_device
535 * that may have a device handler attached
536 * @gfp - the GFP mask used in the kmalloc() call when allocating memory
537 *
538 * Returns name of attached handler, NULL if no handler is attached.
539 * Caller must take care to free the returned string.
540 */
541const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
542{
543 unsigned long flags;
544 struct scsi_device *sdev;
545 const char *handler_name = NULL;
546
547 spin_lock_irqsave(q->queue_lock, flags);
548 sdev = q->queuedata;
549 if (!sdev || !get_device(&sdev->sdev_gendev))
550 sdev = NULL;
551 spin_unlock_irqrestore(q->queue_lock, flags);
552
553 if (!sdev)
554 return NULL;
555
556 if (sdev->scsi_dh_data)
557 handler_name = kstrdup(sdev->scsi_dh_data->scsi_dh->name, gfp);
558
559 put_device(&sdev->sdev_gendev);
560 return handler_name;
561}
562EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
563
530static struct notifier_block scsi_dh_nb = { 564static struct notifier_block scsi_dh_nb = {
531 .notifier_call = scsi_dh_notifier 565 .notifier_call = scsi_dh_notifier
532}; 566};
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fda9cdea0e60..08d80a6d272a 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -46,13 +46,16 @@
46#define TPGS_SUPPORT_OFFLINE 0x40 46#define TPGS_SUPPORT_OFFLINE 0x40
47#define TPGS_SUPPORT_TRANSITION 0x80 47#define TPGS_SUPPORT_TRANSITION 0x80
48 48
49#define RTPG_FMT_MASK 0x70
50#define RTPG_FMT_EXT_HDR 0x10
51
49#define TPGS_MODE_UNINITIALIZED -1 52#define TPGS_MODE_UNINITIALIZED -1
50#define TPGS_MODE_NONE 0x0 53#define TPGS_MODE_NONE 0x0
51#define TPGS_MODE_IMPLICIT 0x1 54#define TPGS_MODE_IMPLICIT 0x1
52#define TPGS_MODE_EXPLICIT 0x2 55#define TPGS_MODE_EXPLICIT 0x2
53 56
54#define ALUA_INQUIRY_SIZE 36 57#define ALUA_INQUIRY_SIZE 36
55#define ALUA_FAILOVER_TIMEOUT (60 * HZ) 58#define ALUA_FAILOVER_TIMEOUT 60
56#define ALUA_FAILOVER_RETRIES 5 59#define ALUA_FAILOVER_RETRIES 5
57 60
58/* flags passed from user level */ 61/* flags passed from user level */
@@ -68,6 +71,7 @@ struct alua_dh_data {
68 unsigned char inq[ALUA_INQUIRY_SIZE]; 71 unsigned char inq[ALUA_INQUIRY_SIZE];
69 unsigned char *buff; 72 unsigned char *buff;
70 int bufflen; 73 int bufflen;
74 unsigned char transition_tmo;
71 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 75 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
72 int senselen; 76 int senselen;
73 struct scsi_device *sdev; 77 struct scsi_device *sdev;
@@ -128,7 +132,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,
128 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 132 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
129 REQ_FAILFAST_DRIVER; 133 REQ_FAILFAST_DRIVER;
130 rq->retries = ALUA_FAILOVER_RETRIES; 134 rq->retries = ALUA_FAILOVER_RETRIES;
131 rq->timeout = ALUA_FAILOVER_TIMEOUT; 135 rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ;
132 136
133 return rq; 137 return rq;
134} 138}
@@ -174,7 +178,8 @@ done:
174 * submit_rtpg - Issue a REPORT TARGET GROUP STATES command 178 * submit_rtpg - Issue a REPORT TARGET GROUP STATES command
175 * @sdev: sdev the command should be sent to 179 * @sdev: sdev the command should be sent to
176 */ 180 */
177static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) 181static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h,
182 bool rtpg_ext_hdr_req)
178{ 183{
179 struct request *rq; 184 struct request *rq;
180 int err = SCSI_DH_RES_TEMP_UNAVAIL; 185 int err = SCSI_DH_RES_TEMP_UNAVAIL;
@@ -185,7 +190,10 @@ static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
185 190
186 /* Prepare the command. */ 191 /* Prepare the command. */
187 rq->cmd[0] = MAINTENANCE_IN; 192 rq->cmd[0] = MAINTENANCE_IN;
188 rq->cmd[1] = MI_REPORT_TARGET_PGS; 193 if (rtpg_ext_hdr_req)
194 rq->cmd[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
195 else
196 rq->cmd[1] = MI_REPORT_TARGET_PGS;
189 rq->cmd[6] = (h->bufflen >> 24) & 0xff; 197 rq->cmd[6] = (h->bufflen >> 24) & 0xff;
190 rq->cmd[7] = (h->bufflen >> 16) & 0xff; 198 rq->cmd[7] = (h->bufflen >> 16) & 0xff;
191 rq->cmd[8] = (h->bufflen >> 8) & 0xff; 199 rq->cmd[8] = (h->bufflen >> 8) & 0xff;
@@ -518,11 +526,18 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
518 int len, k, off, valid_states = 0; 526 int len, k, off, valid_states = 0;
519 unsigned char *ucp; 527 unsigned char *ucp;
520 unsigned err; 528 unsigned err;
521 unsigned long expiry, interval = 1000; 529 bool rtpg_ext_hdr_req = 1;
530 unsigned long expiry, interval = 0;
531 unsigned int tpg_desc_tbl_off;
532 unsigned char orig_transition_tmo;
533
534 if (!h->transition_tmo)
535 expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ);
536 else
537 expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ);
522 538
523 expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
524 retry: 539 retry:
525 err = submit_rtpg(sdev, h); 540 err = submit_rtpg(sdev, h, rtpg_ext_hdr_req);
526 541
527 if (err == SCSI_DH_IO && h->senselen > 0) { 542 if (err == SCSI_DH_IO && h->senselen > 0) {
528 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, 543 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
@@ -530,6 +545,21 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
530 if (!err) 545 if (!err)
531 return SCSI_DH_IO; 546 return SCSI_DH_IO;
532 547
548 /*
549 * submit_rtpg() has failed on existing arrays
550 * when requesting extended header info, and
551 * the array doesn't support extended headers,
552 * even though it shouldn't according to T10.
553 * The retry without rtpg_ext_hdr_req set
554 * handles this.
555 */
556 if (rtpg_ext_hdr_req == 1 &&
557 sense_hdr.sense_key == ILLEGAL_REQUEST &&
558 sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
559 rtpg_ext_hdr_req = 0;
560 goto retry;
561 }
562
533 err = alua_check_sense(sdev, &sense_hdr); 563 err = alua_check_sense(sdev, &sense_hdr);
534 if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry)) 564 if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry))
535 goto retry; 565 goto retry;
@@ -556,7 +586,28 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
556 goto retry; 586 goto retry;
557 } 587 }
558 588
559 for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) { 589 orig_transition_tmo = h->transition_tmo;
590 if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0)
591 h->transition_tmo = h->buff[5];
592 else
593 h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
594
595 if (orig_transition_tmo != h->transition_tmo) {
596 sdev_printk(KERN_INFO, sdev,
597 "%s: transition timeout set to %d seconds\n",
598 ALUA_DH_NAME, h->transition_tmo);
599 expiry = jiffies + h->transition_tmo * HZ;
600 }
601
602 if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
603 tpg_desc_tbl_off = 8;
604 else
605 tpg_desc_tbl_off = 4;
606
607 for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off;
608 k < len;
609 k += off, ucp += off) {
610
560 if (h->group_id == (ucp[2] << 8) + ucp[3]) { 611 if (h->group_id == (ucp[2] << 8) + ucp[3]) {
561 h->state = ucp[0] & 0x0f; 612 h->state = ucp[0] & 0x0f;
562 h->pref = ucp[0] >> 7; 613 h->pref = ucp[0] >> 7;
@@ -581,7 +632,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
581 case TPGS_STATE_TRANSITIONING: 632 case TPGS_STATE_TRANSITIONING:
582 if (time_before(jiffies, expiry)) { 633 if (time_before(jiffies, expiry)) {
583 /* State transition, retry */ 634 /* State transition, retry */
584 interval *= 2; 635 interval += 2000;
585 msleep(interval); 636 msleep(interval);
586 goto retry; 637 goto retry;
587 } 638 }
@@ -691,9 +742,9 @@ static int alua_activate(struct scsi_device *sdev,
691 stpg = 0; 742 stpg = 0;
692 break; 743 break;
693 case TPGS_STATE_STANDBY: 744 case TPGS_STATE_STANDBY:
745 case TPGS_STATE_UNAVAILABLE:
694 stpg = 1; 746 stpg = 1;
695 break; 747 break;
696 case TPGS_STATE_UNAVAILABLE:
697 case TPGS_STATE_OFFLINE: 748 case TPGS_STATE_OFFLINE:
698 err = SCSI_DH_IO; 749 err = SCSI_DH_IO;
699 break; 750 break;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index fe30b1b65e1d..078d262ac7cc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1529,7 +1529,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1529 1529
1530 return 0; 1530 return 0;
1531err: 1531err:
1532 per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++; 1532 per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
1533 put_cpu(); 1533 put_cpu();
1534err2: 1534err2:
1535 kfree_skb(skb); 1535 kfree_skb(skb);
@@ -1569,7 +1569,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1569 struct ethhdr *eh; 1569 struct ethhdr *eh;
1570 struct fcoe_crc_eof *cp; 1570 struct fcoe_crc_eof *cp;
1571 struct sk_buff *skb; 1571 struct sk_buff *skb;
1572 struct fcoe_dev_stats *stats; 1572 struct fc_stats *stats;
1573 struct fc_frame_header *fh; 1573 struct fc_frame_header *fh;
1574 unsigned int hlen; /* header length implies the version */ 1574 unsigned int hlen; /* header length implies the version */
1575 unsigned int tlen; /* trailer length */ 1575 unsigned int tlen; /* trailer length */
@@ -1680,7 +1680,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1680 skb_shinfo(skb)->gso_size = 0; 1680 skb_shinfo(skb)->gso_size = 0;
1681 } 1681 }
1682 /* update tx stats: regardless if LLD fails */ 1682 /* update tx stats: regardless if LLD fails */
1683 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1683 stats = per_cpu_ptr(lport->stats, get_cpu());
1684 stats->TxFrames++; 1684 stats->TxFrames++;
1685 stats->TxWords += wlen; 1685 stats->TxWords += wlen;
1686 put_cpu(); 1686 put_cpu();
@@ -1714,7 +1714,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
1714 struct fcoe_interface *fcoe; 1714 struct fcoe_interface *fcoe;
1715 struct fc_frame_header *fh; 1715 struct fc_frame_header *fh;
1716 struct sk_buff *skb = (struct sk_buff *)fp; 1716 struct sk_buff *skb = (struct sk_buff *)fp;
1717 struct fcoe_dev_stats *stats; 1717 struct fc_stats *stats;
1718 1718
1719 /* 1719 /*
1720 * We only check CRC if no offload is available and if it is 1720 * We only check CRC if no offload is available and if it is
@@ -1745,7 +1745,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
1745 return 0; 1745 return 0;
1746 } 1746 }
1747 1747
1748 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1748 stats = per_cpu_ptr(lport->stats, get_cpu());
1749 stats->InvalidCRCCount++; 1749 stats->InvalidCRCCount++;
1750 if (stats->InvalidCRCCount < 5) 1750 if (stats->InvalidCRCCount < 5)
1751 printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); 1751 printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
@@ -1762,7 +1762,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1762 u32 fr_len; 1762 u32 fr_len;
1763 struct fc_lport *lport; 1763 struct fc_lport *lport;
1764 struct fcoe_rcv_info *fr; 1764 struct fcoe_rcv_info *fr;
1765 struct fcoe_dev_stats *stats; 1765 struct fc_stats *stats;
1766 struct fcoe_crc_eof crc_eof; 1766 struct fcoe_crc_eof crc_eof;
1767 struct fc_frame *fp; 1767 struct fc_frame *fp;
1768 struct fcoe_port *port; 1768 struct fcoe_port *port;
@@ -1793,7 +1793,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1793 */ 1793 */
1794 hp = (struct fcoe_hdr *) skb_network_header(skb); 1794 hp = (struct fcoe_hdr *) skb_network_header(skb);
1795 1795
1796 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1796 stats = per_cpu_ptr(lport->stats, get_cpu());
1797 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { 1797 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1798 if (stats->ErrorFrames < 5) 1798 if (stats->ErrorFrames < 5)
1799 printk(KERN_WARNING "fcoe: FCoE version " 1799 printk(KERN_WARNING "fcoe: FCoE version "
@@ -1851,23 +1851,25 @@ static int fcoe_percpu_receive_thread(void *arg)
1851 1851
1852 set_user_nice(current, -20); 1852 set_user_nice(current, -20);
1853 1853
1854retry:
1854 while (!kthread_should_stop()) { 1855 while (!kthread_should_stop()) {
1855 1856
1856 spin_lock_bh(&p->fcoe_rx_list.lock); 1857 spin_lock_bh(&p->fcoe_rx_list.lock);
1857 skb_queue_splice_init(&p->fcoe_rx_list, &tmp); 1858 skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
1858 spin_unlock_bh(&p->fcoe_rx_list.lock);
1859
1860 while ((skb = __skb_dequeue(&tmp)) != NULL)
1861 fcoe_recv_frame(skb);
1862 1859
1863 spin_lock_bh(&p->fcoe_rx_list.lock); 1860 if (!skb_queue_len(&tmp)) {
1864 if (!skb_queue_len(&p->fcoe_rx_list)) {
1865 set_current_state(TASK_INTERRUPTIBLE); 1861 set_current_state(TASK_INTERRUPTIBLE);
1866 spin_unlock_bh(&p->fcoe_rx_list.lock); 1862 spin_unlock_bh(&p->fcoe_rx_list.lock);
1867 schedule(); 1863 schedule();
1868 set_current_state(TASK_RUNNING); 1864 set_current_state(TASK_RUNNING);
1869 } else 1865 goto retry;
1870 spin_unlock_bh(&p->fcoe_rx_list.lock); 1866 }
1867
1868 spin_unlock_bh(&p->fcoe_rx_list.lock);
1869
1870 while ((skb = __skb_dequeue(&tmp)) != NULL)
1871 fcoe_recv_frame(skb);
1872
1871 } 1873 }
1872 return 0; 1874 return 0;
1873} 1875}
@@ -1970,7 +1972,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1970 struct fcoe_ctlr *ctlr; 1972 struct fcoe_ctlr *ctlr;
1971 struct fcoe_interface *fcoe; 1973 struct fcoe_interface *fcoe;
1972 struct fcoe_port *port; 1974 struct fcoe_port *port;
1973 struct fcoe_dev_stats *stats; 1975 struct fc_stats *stats;
1974 u32 link_possible = 1; 1976 u32 link_possible = 1;
1975 u32 mfs; 1977 u32 mfs;
1976 int rc = NOTIFY_OK; 1978 int rc = NOTIFY_OK;
@@ -2024,7 +2026,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
2024 if (link_possible && !fcoe_link_ok(lport)) 2026 if (link_possible && !fcoe_link_ok(lport))
2025 fcoe_ctlr_link_up(ctlr); 2027 fcoe_ctlr_link_up(ctlr);
2026 else if (fcoe_ctlr_link_down(ctlr)) { 2028 else if (fcoe_ctlr_link_down(ctlr)) {
2027 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 2029 stats = per_cpu_ptr(lport->stats, get_cpu());
2028 stats->LinkFailureCount++; 2030 stats->LinkFailureCount++;
2029 put_cpu(); 2031 put_cpu();
2030 fcoe_clean_pending_queue(lport); 2032 fcoe_clean_pending_queue(lport);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index d68d57241ee6..2ebe03a4b51d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -788,11 +788,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
788 unsigned long deadline; 788 unsigned long deadline;
789 unsigned long sel_time = 0; 789 unsigned long sel_time = 0;
790 struct list_head del_list; 790 struct list_head del_list;
791 struct fcoe_dev_stats *stats; 791 struct fc_stats *stats;
792 792
793 INIT_LIST_HEAD(&del_list); 793 INIT_LIST_HEAD(&del_list);
794 794
795 stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); 795 stats = per_cpu_ptr(fip->lp->stats, get_cpu());
796 796
797 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 797 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
798 deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; 798 deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
@@ -1104,8 +1104,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
1104 struct fc_frame_header *fh = NULL; 1104 struct fc_frame_header *fh = NULL;
1105 struct fip_desc *desc; 1105 struct fip_desc *desc;
1106 struct fip_encaps *els; 1106 struct fip_encaps *els;
1107 struct fcoe_dev_stats *stats;
1108 struct fcoe_fcf *sel; 1107 struct fcoe_fcf *sel;
1108 struct fc_stats *stats;
1109 enum fip_desc_type els_dtype = 0; 1109 enum fip_desc_type els_dtype = 0;
1110 u8 els_op; 1110 u8 els_op;
1111 u8 sub; 1111 u8 sub;
@@ -1249,7 +1249,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
1249 fr_dev(fp) = lport; 1249 fr_dev(fp) = lport;
1250 fr_encaps(fp) = els_dtype; 1250 fr_encaps(fp) = els_dtype;
1251 1251
1252 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1252 stats = per_cpu_ptr(lport->stats, get_cpu());
1253 stats->RxFrames++; 1253 stats->RxFrames++;
1254 stats->RxWords += skb->len / FIP_BPW; 1254 stats->RxWords += skb->len / FIP_BPW;
1255 put_cpu(); 1255 put_cpu();
@@ -1353,7 +1353,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1353 ntoh24(vp->fd_fc_id)); 1353 ntoh24(vp->fd_fc_id));
1354 if (vn_port && (vn_port == lport)) { 1354 if (vn_port && (vn_port == lport)) {
1355 mutex_lock(&fip->ctlr_mutex); 1355 mutex_lock(&fip->ctlr_mutex);
1356 per_cpu_ptr(lport->dev_stats, 1356 per_cpu_ptr(lport->stats,
1357 get_cpu())->VLinkFailureCount++; 1357 get_cpu())->VLinkFailureCount++;
1358 put_cpu(); 1358 put_cpu();
1359 fcoe_ctlr_reset(fip); 1359 fcoe_ctlr_reset(fip);
@@ -1383,8 +1383,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1383 * followed by physical port 1383 * followed by physical port
1384 */ 1384 */
1385 mutex_lock(&fip->ctlr_mutex); 1385 mutex_lock(&fip->ctlr_mutex);
1386 per_cpu_ptr(lport->dev_stats, 1386 per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++;
1387 get_cpu())->VLinkFailureCount++;
1388 put_cpu(); 1387 put_cpu();
1389 fcoe_ctlr_reset(fip); 1388 fcoe_ctlr_reset(fip);
1390 mutex_unlock(&fip->ctlr_mutex); 1389 mutex_unlock(&fip->ctlr_mutex);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 2bc163198d33..5e751689a089 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -102,7 +102,7 @@ static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
102 int ret; 102 int ret;
103 103
104 ret = kstrtoul(buf, 0, val); 104 ret = kstrtoul(buf, 0, val);
105 if (ret || *val < 0) 105 if (ret)
106 return -EINVAL; 106 return -EINVAL;
107 /* 107 /*
108 * Check for overflow; dev_loss_tmo is u32 108 * Check for overflow; dev_loss_tmo is u32
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index b46f43dced78..ac76d8a042d7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -89,7 +89,7 @@ void __fcoe_get_lesb(struct fc_lport *lport,
89{ 89{
90 unsigned int cpu; 90 unsigned int cpu;
91 u32 lfc, vlfc, mdac; 91 u32 lfc, vlfc, mdac;
92 struct fcoe_dev_stats *devst; 92 struct fc_stats *stats;
93 struct fcoe_fc_els_lesb *lesb; 93 struct fcoe_fc_els_lesb *lesb;
94 struct rtnl_link_stats64 temp; 94 struct rtnl_link_stats64 temp;
95 95
@@ -99,10 +99,10 @@ void __fcoe_get_lesb(struct fc_lport *lport,
99 lesb = (struct fcoe_fc_els_lesb *)fc_lesb; 99 lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
100 memset(lesb, 0, sizeof(*lesb)); 100 memset(lesb, 0, sizeof(*lesb));
101 for_each_possible_cpu(cpu) { 101 for_each_possible_cpu(cpu) {
102 devst = per_cpu_ptr(lport->dev_stats, cpu); 102 stats = per_cpu_ptr(lport->stats, cpu);
103 lfc += devst->LinkFailureCount; 103 lfc += stats->LinkFailureCount;
104 vlfc += devst->VLinkFailureCount; 104 vlfc += stats->VLinkFailureCount;
105 mdac += devst->MissDiscAdvCount; 105 mdac += stats->MissDiscAdvCount;
106 } 106 }
107 lesb->lesb_link_fail = htonl(lfc); 107 lesb->lesb_link_fail = htonl(lfc);
108 lesb->lesb_vlink_fail = htonl(vlfc); 108 lesb->lesb_vlink_fail = htonl(vlfc);
@@ -502,7 +502,7 @@ static int __init fcoe_transport_init(void)
502 return 0; 502 return 0;
503} 503}
504 504
505static int __exit fcoe_transport_exit(void) 505static int fcoe_transport_exit(void)
506{ 506{
507 struct fcoe_transport *ft; 507 struct fcoe_transport *ft;
508 508
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index a3a056a9db67..593085a52275 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -42,7 +42,7 @@
42#include "scsi_logging.h" 42#include "scsi_logging.h"
43 43
44 44
45static atomic_t scsi_host_next_hn; /* host_no for next new host */ 45static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
46 46
47 47
48static void scsi_host_cls_release(struct device *dev) 48static void scsi_host_cls_release(struct device *dev)
@@ -290,6 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
290 struct Scsi_Host *shost = dev_to_shost(dev); 290 struct Scsi_Host *shost = dev_to_shost(dev);
291 struct device *parent = dev->parent; 291 struct device *parent = dev->parent;
292 struct request_queue *q; 292 struct request_queue *q;
293 void *queuedata;
293 294
294 scsi_proc_hostdir_rm(shost->hostt); 295 scsi_proc_hostdir_rm(shost->hostt);
295 296
@@ -299,9 +300,9 @@ static void scsi_host_dev_release(struct device *dev)
299 destroy_workqueue(shost->work_q); 300 destroy_workqueue(shost->work_q);
300 q = shost->uspace_req_q; 301 q = shost->uspace_req_q;
301 if (q) { 302 if (q) {
302 kfree(q->queuedata); 303 queuedata = q->queuedata;
303 q->queuedata = NULL; 304 blk_cleanup_queue(q);
304 scsi_free_queue(q); 305 kfree(queuedata);
305 } 306 }
306 307
307 scsi_destroy_command_freelist(shost); 308 scsi_destroy_command_freelist(shost);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 10b65556937b..192724ed7a32 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
42 42
43static char driver_name[] = "hptiop"; 43static char driver_name[] = "hptiop";
44static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; 44static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
45static const char driver_ver[] = "v1.6 (090910)"; 45static const char driver_ver[] = "v1.6 (091225)";
46 46
47static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); 47static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
48static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, 48static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
@@ -958,6 +958,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
958{ 958{
959 struct Scsi_Host *host = NULL; 959 struct Scsi_Host *host = NULL;
960 struct hptiop_hba *hba; 960 struct hptiop_hba *hba;
961 struct hptiop_adapter_ops *iop_ops;
961 struct hpt_iop_request_get_config iop_config; 962 struct hpt_iop_request_get_config iop_config;
962 struct hpt_iop_request_set_config set_config; 963 struct hpt_iop_request_set_config set_config;
963 dma_addr_t start_phy; 964 dma_addr_t start_phy;
@@ -978,7 +979,8 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
978 pci_set_master(pcidev); 979 pci_set_master(pcidev);
979 980
980 /* Enable 64bit DMA if possible */ 981 /* Enable 64bit DMA if possible */
981 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { 982 iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
983 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) {
982 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { 984 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
983 printk(KERN_ERR "hptiop: fail to set dma_mask\n"); 985 printk(KERN_ERR "hptiop: fail to set dma_mask\n");
984 goto disable_pci_device; 986 goto disable_pci_device;
@@ -998,7 +1000,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
998 1000
999 hba = (struct hptiop_hba *)host->hostdata; 1001 hba = (struct hptiop_hba *)host->hostdata;
1000 1002
1001 hba->ops = (struct hptiop_adapter_ops *)id->driver_data; 1003 hba->ops = iop_ops;
1002 hba->pcidev = pcidev; 1004 hba->pcidev = pcidev;
1003 hba->host = host; 1005 hba->host = host;
1004 hba->initialized = 0; 1006 hba->initialized = 0;
@@ -1239,6 +1241,7 @@ static struct hptiop_adapter_ops hptiop_itl_ops = {
1239 .iop_intr = iop_intr_itl, 1241 .iop_intr = iop_intr_itl,
1240 .post_msg = hptiop_post_msg_itl, 1242 .post_msg = hptiop_post_msg_itl,
1241 .post_req = hptiop_post_req_itl, 1243 .post_req = hptiop_post_req_itl,
1244 .hw_dma_bit_mask = 64,
1242}; 1245};
1243 1246
1244static struct hptiop_adapter_ops hptiop_mv_ops = { 1247static struct hptiop_adapter_ops hptiop_mv_ops = {
@@ -1254,6 +1257,7 @@ static struct hptiop_adapter_ops hptiop_mv_ops = {
1254 .iop_intr = iop_intr_mv, 1257 .iop_intr = iop_intr_mv,
1255 .post_msg = hptiop_post_msg_mv, 1258 .post_msg = hptiop_post_msg_mv,
1256 .post_req = hptiop_post_req_mv, 1259 .post_req = hptiop_post_req_mv,
1260 .hw_dma_bit_mask = 33,
1257}; 1261};
1258 1262
1259static struct pci_device_id hptiop_id_table[] = { 1263static struct pci_device_id hptiop_id_table[] = {
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 0b871c0ae568..baa648d87fde 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -297,6 +297,7 @@ struct hptiop_adapter_ops {
297 int (*iop_intr)(struct hptiop_hba *hba); 297 int (*iop_intr)(struct hptiop_hba *hba);
298 void (*post_msg)(struct hptiop_hba *hba, u32 msg); 298 void (*post_msg)(struct hptiop_hba *hba, u32 msg);
299 void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req); 299 void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
300 int hw_dma_bit_mask;
300}; 301};
301 302
302#define HPT_IOCTL_RESULT_OK 0 303#define HPT_IOCTL_RESULT_OK 0
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 47e28b555029..92c1d86d1fc6 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -166,6 +166,9 @@ static struct scsi_host_template isci_sht = {
166 .sg_tablesize = SG_ALL, 166 .sg_tablesize = SG_ALL,
167 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 167 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
168 .use_clustering = ENABLE_CLUSTERING, 168 .use_clustering = ENABLE_CLUSTERING,
169 .eh_abort_handler = sas_eh_abort_handler,
170 .eh_device_reset_handler = sas_eh_device_reset_handler,
171 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
169 .target_destroy = sas_target_destroy, 172 .target_destroy = sas_target_destroy,
170 .ioctl = sas_ioctl, 173 .ioctl = sas_ioctl,
171 .shost_attrs = isci_host_attrs, 174 .shost_attrs = isci_host_attrs,
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index aceffadb21c7..c772d8d27159 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -99,11 +99,6 @@ struct fc_exch_mgr {
99 u16 max_xid; 99 u16 max_xid;
100 u16 pool_max_index; 100 u16 pool_max_index;
101 101
102 /*
103 * currently exchange mgr stats are updated but not used.
104 * either stats can be expose via sysfs or remove them
105 * all together if not used XXX
106 */
107 struct { 102 struct {
108 atomic_t no_free_exch; 103 atomic_t no_free_exch;
109 atomic_t no_free_exch_xid; 104 atomic_t no_free_exch_xid;
@@ -124,7 +119,7 @@ struct fc_exch_mgr {
124 * for each anchor to determine if that EM should be used. The last 119 * for each anchor to determine if that EM should be used. The last
125 * anchor in the list will always match to handle any exchanges not 120 * anchor in the list will always match to handle any exchanges not
126 * handled by other EMs. The non-default EMs would be added to the 121 * handled by other EMs. The non-default EMs would be added to the
127 * anchor list by HW that provides FCoE offloads. 122 * anchor list by HW that provides offloads.
128 */ 123 */
129struct fc_exch_mgr_anchor { 124struct fc_exch_mgr_anchor {
130 struct list_head ema_list; 125 struct list_head ema_list;
@@ -339,6 +334,52 @@ static void fc_exch_release(struct fc_exch *ep)
339} 334}
340 335
341/** 336/**
337 * fc_exch_timer_cancel() - cancel exch timer
338 * @ep: The exchange whose timer to be canceled
339 */
340static inline void fc_exch_timer_cancel(struct fc_exch *ep)
341{
342 if (cancel_delayed_work(&ep->timeout_work)) {
343 FC_EXCH_DBG(ep, "Exchange timer canceled\n");
344 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
345 }
346}
347
348/**
349 * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
350 * the exchange lock held
351 * @ep: The exchange whose timer will start
352 * @timer_msec: The timeout period
353 *
354 * Used for upper level protocols to time out the exchange.
355 * The timer is cancelled when it fires or when the exchange completes.
356 */
357static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
358 unsigned int timer_msec)
359{
360 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
361 return;
362
363 FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
364
365 if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
366 msecs_to_jiffies(timer_msec)))
367 fc_exch_hold(ep); /* hold for timer */
368}
369
370/**
371 * fc_exch_timer_set() - Lock the exchange and set the timer
372 * @ep: The exchange whose timer will start
373 * @timer_msec: The timeout period
374 */
375static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
376{
377 spin_lock_bh(&ep->ex_lock);
378 fc_exch_timer_set_locked(ep, timer_msec);
379 spin_unlock_bh(&ep->ex_lock);
380}
381
382/**
342 * fc_exch_done_locked() - Complete an exchange with the exchange lock held 383 * fc_exch_done_locked() - Complete an exchange with the exchange lock held
343 * @ep: The exchange that is complete 384 * @ep: The exchange that is complete
344 */ 385 */
@@ -359,8 +400,7 @@ static int fc_exch_done_locked(struct fc_exch *ep)
359 400
360 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { 401 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
361 ep->state |= FC_EX_DONE; 402 ep->state |= FC_EX_DONE;
362 if (cancel_delayed_work(&ep->timeout_work)) 403 fc_exch_timer_cancel(ep);
363 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
364 rc = 0; 404 rc = 0;
365 } 405 }
366 return rc; 406 return rc;
@@ -424,40 +464,6 @@ static void fc_exch_delete(struct fc_exch *ep)
424} 464}
425 465
426/** 466/**
427 * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
428 * the exchange lock held
429 * @ep: The exchange whose timer will start
430 * @timer_msec: The timeout period
431 *
432 * Used for upper level protocols to time out the exchange.
433 * The timer is cancelled when it fires or when the exchange completes.
434 */
435static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
436 unsigned int timer_msec)
437{
438 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
439 return;
440
441 FC_EXCH_DBG(ep, "Exchange timer armed\n");
442
443 if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
444 msecs_to_jiffies(timer_msec)))
445 fc_exch_hold(ep); /* hold for timer */
446}
447
448/**
449 * fc_exch_timer_set() - Lock the exchange and set the timer
450 * @ep: The exchange whose timer will start
451 * @timer_msec: The timeout period
452 */
453static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
454{
455 spin_lock_bh(&ep->ex_lock);
456 fc_exch_timer_set_locked(ep, timer_msec);
457 spin_unlock_bh(&ep->ex_lock);
458}
459
460/**
461 * fc_seq_send() - Send a frame using existing sequence/exchange pair 467 * fc_seq_send() - Send a frame using existing sequence/exchange pair
462 * @lport: The local port that the exchange will be sent on 468 * @lport: The local port that the exchange will be sent on
463 * @sp: The sequence to be sent 469 * @sp: The sequence to be sent
@@ -986,7 +992,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
986 /* 992 /*
987 * Update sequence_id based on incoming last 993 * Update sequence_id based on incoming last
988 * frame of sequence exchange. This is needed 994 * frame of sequence exchange. This is needed
989 * for FCoE target where DDP has been used 995 * for FC target where DDP has been used
990 * on target where, stack is indicated only 996 * on target where, stack is indicated only
991 * about last frame's (payload _header) header. 997 * about last frame's (payload _header) header.
992 * Whereas "seq_id" which is part of 998 * Whereas "seq_id" which is part of
@@ -1549,8 +1555,10 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1549 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl, 1555 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1550 fc_exch_rctl_name(fh->fh_r_ctl)); 1556 fc_exch_rctl_name(fh->fh_r_ctl));
1551 1557
1552 if (cancel_delayed_work_sync(&ep->timeout_work)) 1558 if (cancel_delayed_work_sync(&ep->timeout_work)) {
1559 FC_EXCH_DBG(ep, "Exchange timer canceled\n");
1553 fc_exch_release(ep); /* release from pending timer hold */ 1560 fc_exch_release(ep); /* release from pending timer hold */
1561 }
1554 1562
1555 spin_lock_bh(&ep->ex_lock); 1563 spin_lock_bh(&ep->ex_lock);
1556 switch (fh->fh_r_ctl) { 1564 switch (fh->fh_r_ctl) {
@@ -1737,8 +1745,7 @@ static void fc_exch_reset(struct fc_exch *ep)
1737 spin_lock_bh(&ep->ex_lock); 1745 spin_lock_bh(&ep->ex_lock);
1738 fc_exch_abort_locked(ep, 0); 1746 fc_exch_abort_locked(ep, 0);
1739 ep->state |= FC_EX_RST_CLEANUP; 1747 ep->state |= FC_EX_RST_CLEANUP;
1740 if (cancel_delayed_work(&ep->timeout_work)) 1748 fc_exch_timer_cancel(ep);
1741 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1742 resp = ep->resp; 1749 resp = ep->resp;
1743 ep->resp = NULL; 1750 ep->resp = NULL;
1744 if (ep->esb_stat & ESB_ST_REC_QUAL) 1751 if (ep->esb_stat & ESB_ST_REC_QUAL)
@@ -2133,10 +2140,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp)
2133 ep->esb_stat &= ~ESB_ST_REC_QUAL; 2140 ep->esb_stat &= ~ESB_ST_REC_QUAL;
2134 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */ 2141 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
2135 } 2142 }
2136 if (ep->esb_stat & ESB_ST_COMPLETE) { 2143 if (ep->esb_stat & ESB_ST_COMPLETE)
2137 if (cancel_delayed_work(&ep->timeout_work)) 2144 fc_exch_timer_cancel(ep);
2138 atomic_dec(&ep->ex_refcnt); /* drop timer hold */
2139 }
2140 2145
2141 spin_unlock_bh(&ep->ex_lock); 2146 spin_unlock_bh(&ep->ex_lock);
2142 2147
@@ -2156,6 +2161,31 @@ out:
2156} 2161}
2157 2162
2158/** 2163/**
2164 * fc_exch_update_stats() - update exches stats to lport
2165 * @lport: The local port to update exchange manager stats
2166 */
2167void fc_exch_update_stats(struct fc_lport *lport)
2168{
2169 struct fc_host_statistics *st;
2170 struct fc_exch_mgr_anchor *ema;
2171 struct fc_exch_mgr *mp;
2172
2173 st = &lport->host_stats;
2174
2175 list_for_each_entry(ema, &lport->ema_list, ema_list) {
2176 mp = ema->mp;
2177 st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2178 st->fc_no_free_exch_xid +=
2179 atomic_read(&mp->stats.no_free_exch_xid);
2180 st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2181 st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2182 st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2183 st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2184 }
2185}
2186EXPORT_SYMBOL(fc_exch_update_stats);
2187
2188/**
2159 * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs 2189 * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
2160 * @lport: The local port to add the exchange manager to 2190 * @lport: The local port to add the exchange manager to
2161 * @mp: The exchange manager to be added to the local port 2191 * @mp: The exchange manager to be added to the local port
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index f7357308655a..14243fa5f8e8 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -158,6 +158,9 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
158 fsp->timer.data = (unsigned long)fsp; 158 fsp->timer.data = (unsigned long)fsp;
159 INIT_LIST_HEAD(&fsp->list); 159 INIT_LIST_HEAD(&fsp->list);
160 spin_lock_init(&fsp->scsi_pkt_lock); 160 spin_lock_init(&fsp->scsi_pkt_lock);
161 } else {
162 per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++;
163 put_cpu();
161 } 164 }
162 return fsp; 165 return fsp;
163} 166}
@@ -264,6 +267,9 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
264 if (!fsp->seq_ptr) 267 if (!fsp->seq_ptr)
265 return -EINVAL; 268 return -EINVAL;
266 269
270 per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
271 put_cpu();
272
267 fsp->state |= FC_SRB_ABORT_PENDING; 273 fsp->state |= FC_SRB_ABORT_PENDING;
268 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); 274 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
269} 275}
@@ -420,6 +426,8 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
420 if (likely(fp)) 426 if (likely(fp))
421 return fp; 427 return fp;
422 428
429 per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++;
430 put_cpu();
423 /* error case */ 431 /* error case */
424 fc_fcp_can_queue_ramp_down(lport); 432 fc_fcp_can_queue_ramp_down(lport);
425 return NULL; 433 return NULL;
@@ -434,7 +442,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
434{ 442{
435 struct scsi_cmnd *sc = fsp->cmd; 443 struct scsi_cmnd *sc = fsp->cmd;
436 struct fc_lport *lport = fsp->lp; 444 struct fc_lport *lport = fsp->lp;
437 struct fcoe_dev_stats *stats; 445 struct fc_stats *stats;
438 struct fc_frame_header *fh; 446 struct fc_frame_header *fh;
439 size_t start_offset; 447 size_t start_offset;
440 size_t offset; 448 size_t offset;
@@ -496,7 +504,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
496 504
497 if (~crc != le32_to_cpu(fr_crc(fp))) { 505 if (~crc != le32_to_cpu(fr_crc(fp))) {
498crc_err: 506crc_err:
499 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 507 stats = per_cpu_ptr(lport->stats, get_cpu());
500 stats->ErrorFrames++; 508 stats->ErrorFrames++;
501 /* per cpu count, not total count, but OK for limit */ 509 /* per cpu count, not total count, but OK for limit */
502 if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT) 510 if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
@@ -1372,10 +1380,10 @@ static void fc_fcp_timeout(unsigned long data)
1372 1380
1373 fsp->state |= FC_SRB_FCP_PROCESSING_TMO; 1381 fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
1374 1382
1375 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) 1383 if (fsp->state & FC_SRB_RCV_STATUS)
1376 fc_fcp_rec(fsp);
1377 else if (fsp->state & FC_SRB_RCV_STATUS)
1378 fc_fcp_complete_locked(fsp); 1384 fc_fcp_complete_locked(fsp);
1385 else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
1386 fc_fcp_rec(fsp);
1379 else 1387 else
1380 fc_fcp_recovery(fsp, FC_TIMED_OUT); 1388 fc_fcp_recovery(fsp, FC_TIMED_OUT);
1381 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; 1389 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
@@ -1786,7 +1794,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1786 struct fc_rport_libfc_priv *rpriv; 1794 struct fc_rport_libfc_priv *rpriv;
1787 int rval; 1795 int rval;
1788 int rc = 0; 1796 int rc = 0;
1789 struct fcoe_dev_stats *stats; 1797 struct fc_stats *stats;
1790 1798
1791 rval = fc_remote_port_chkready(rport); 1799 rval = fc_remote_port_chkready(rport);
1792 if (rval) { 1800 if (rval) {
@@ -1835,7 +1843,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1835 /* 1843 /*
1836 * setup the data direction 1844 * setup the data direction
1837 */ 1845 */
1838 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1846 stats = per_cpu_ptr(lport->stats, get_cpu());
1839 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1847 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1840 fsp->req_flags = FC_SRB_READ; 1848 fsp->req_flags = FC_SRB_READ;
1841 stats->InputRequests++; 1849 stats->InputRequests++;
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
index 981329a17c48..0382ac06906e 100644
--- a/drivers/scsi/libfc/fc_frame.c
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -49,7 +49,7 @@ u32 fc_frame_crc_check(struct fc_frame *fp)
49EXPORT_SYMBOL(fc_frame_crc_check); 49EXPORT_SYMBOL(fc_frame_crc_check);
50 50
51/* 51/*
52 * Allocate a frame intended to be sent via fcoe_xmit. 52 * Allocate a frame intended to be sent.
53 * Get an sk_buff for the frame and set the length. 53 * Get an sk_buff for the frame and set the length.
54 */ 54 */
55struct fc_frame *_fc_frame_alloc(size_t len) 55struct fc_frame *_fc_frame_alloc(size_t len)
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index c1402fb499ab..f04d15c67df3 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -299,47 +299,54 @@ EXPORT_SYMBOL(fc_get_host_speed);
299 */ 299 */
300struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) 300struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
301{ 301{
302 struct fc_host_statistics *fcoe_stats; 302 struct fc_host_statistics *fc_stats;
303 struct fc_lport *lport = shost_priv(shost); 303 struct fc_lport *lport = shost_priv(shost);
304 struct timespec v0, v1; 304 struct timespec v0, v1;
305 unsigned int cpu; 305 unsigned int cpu;
306 u64 fcp_in_bytes = 0; 306 u64 fcp_in_bytes = 0;
307 u64 fcp_out_bytes = 0; 307 u64 fcp_out_bytes = 0;
308 308
309 fcoe_stats = &lport->host_stats; 309 fc_stats = &lport->host_stats;
310 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); 310 memset(fc_stats, 0, sizeof(struct fc_host_statistics));
311 311
312 jiffies_to_timespec(jiffies, &v0); 312 jiffies_to_timespec(jiffies, &v0);
313 jiffies_to_timespec(lport->boot_time, &v1); 313 jiffies_to_timespec(lport->boot_time, &v1);
314 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); 314 fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
315 315
316 for_each_possible_cpu(cpu) { 316 for_each_possible_cpu(cpu) {
317 struct fcoe_dev_stats *stats; 317 struct fc_stats *stats;
318 318
319 stats = per_cpu_ptr(lport->dev_stats, cpu); 319 stats = per_cpu_ptr(lport->stats, cpu);
320 320
321 fcoe_stats->tx_frames += stats->TxFrames; 321 fc_stats->tx_frames += stats->TxFrames;
322 fcoe_stats->tx_words += stats->TxWords; 322 fc_stats->tx_words += stats->TxWords;
323 fcoe_stats->rx_frames += stats->RxFrames; 323 fc_stats->rx_frames += stats->RxFrames;
324 fcoe_stats->rx_words += stats->RxWords; 324 fc_stats->rx_words += stats->RxWords;
325 fcoe_stats->error_frames += stats->ErrorFrames; 325 fc_stats->error_frames += stats->ErrorFrames;
326 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount; 326 fc_stats->invalid_crc_count += stats->InvalidCRCCount;
327 fcoe_stats->fcp_input_requests += stats->InputRequests; 327 fc_stats->fcp_input_requests += stats->InputRequests;
328 fcoe_stats->fcp_output_requests += stats->OutputRequests; 328 fc_stats->fcp_output_requests += stats->OutputRequests;
329 fcoe_stats->fcp_control_requests += stats->ControlRequests; 329 fc_stats->fcp_control_requests += stats->ControlRequests;
330 fcp_in_bytes += stats->InputBytes; 330 fcp_in_bytes += stats->InputBytes;
331 fcp_out_bytes += stats->OutputBytes; 331 fcp_out_bytes += stats->OutputBytes;
332 fcoe_stats->link_failure_count += stats->LinkFailureCount; 332 fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
333 fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
334 fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
335 fc_stats->link_failure_count += stats->LinkFailureCount;
333 } 336 }
334 fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); 337 fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
335 fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); 338 fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
336 fcoe_stats->lip_count = -1; 339 fc_stats->lip_count = -1;
337 fcoe_stats->nos_count = -1; 340 fc_stats->nos_count = -1;
338 fcoe_stats->loss_of_sync_count = -1; 341 fc_stats->loss_of_sync_count = -1;
339 fcoe_stats->loss_of_signal_count = -1; 342 fc_stats->loss_of_signal_count = -1;
340 fcoe_stats->prim_seq_protocol_err_count = -1; 343 fc_stats->prim_seq_protocol_err_count = -1;
341 fcoe_stats->dumped_frames = -1; 344 fc_stats->dumped_frames = -1;
342 return fcoe_stats; 345
346 /* update exches stats */
347 fc_exch_update_stats(lport);
348
349 return fc_stats;
343} 350}
344EXPORT_SYMBOL(fc_get_host_stats); 351EXPORT_SYMBOL(fc_get_host_stats);
345 352
@@ -973,7 +980,8 @@ drop:
973 rcu_read_unlock(); 980 rcu_read_unlock();
974 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); 981 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
975 fc_frame_free(fp); 982 fc_frame_free(fp);
976 lport->tt.exch_done(sp); 983 if (sp)
984 lport->tt.exch_done(sp);
977} 985}
978 986
979/** 987/**
@@ -1590,8 +1598,9 @@ static void fc_lport_timeout(struct work_struct *work)
1590 case LPORT_ST_RPA: 1598 case LPORT_ST_RPA:
1591 case LPORT_ST_DHBA: 1599 case LPORT_ST_DHBA:
1592 case LPORT_ST_DPRT: 1600 case LPORT_ST_DPRT:
1593 fc_lport_enter_ms(lport, lport->state); 1601 FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
1594 break; 1602 fc_lport_state(lport));
1603 /* fall thru */
1595 case LPORT_ST_SCR: 1604 case LPORT_ST_SCR:
1596 fc_lport_enter_scr(lport); 1605 fc_lport_enter_scr(lport);
1597 break; 1606 break;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 441d88ad99a7..a59fcdc8fd63 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task)
139 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || 139 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
140 ((stat->stat == SAM_STAT_CHECK_CONDITION && 140 ((stat->stat == SAM_STAT_CHECK_CONDITION &&
141 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { 141 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
142 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); 142 memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
143 143
144 if (!link->sactive) { 144 if (!link->sactive) {
145 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); 145 qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
146 } else { 146 } else {
147 link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command); 147 link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
148 if (unlikely(link->eh_info.err_mask)) 148 if (unlikely(link->eh_info.err_mask))
149 qc->flags |= ATA_QCFLAG_FAILED; 149 qc->flags |= ATA_QCFLAG_FAILED;
150 } 150 }
@@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task)
161 qc->flags |= ATA_QCFLAG_FAILED; 161 qc->flags |= ATA_QCFLAG_FAILED;
162 } 162 }
163 163
164 dev->sata_dev.tf.feature = 0x04; /* status err */ 164 dev->sata_dev.fis[3] = 0x04; /* status err */
165 dev->sata_dev.tf.command = ATA_ERR; 165 dev->sata_dev.fis[2] = ATA_ERR;
166 } 166 }
167 } 167 }
168 168
@@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
269{ 269{
270 struct domain_device *dev = qc->ap->private_data; 270 struct domain_device *dev = qc->ap->private_data;
271 271
272 memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf)); 272 ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
273 return true; 273 return true;
274} 274}
275 275
@@ -523,6 +523,31 @@ static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
523 i->dft->lldd_ata_set_dmamode(dev); 523 i->dft->lldd_ata_set_dmamode(dev);
524} 524}
525 525
526static void sas_ata_sched_eh(struct ata_port *ap)
527{
528 struct domain_device *dev = ap->private_data;
529 struct sas_ha_struct *ha = dev->port->ha;
530 unsigned long flags;
531
532 spin_lock_irqsave(&ha->lock, flags);
533 if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state))
534 ha->eh_active++;
535 ata_std_sched_eh(ap);
536 spin_unlock_irqrestore(&ha->lock, flags);
537}
538
539void sas_ata_end_eh(struct ata_port *ap)
540{
541 struct domain_device *dev = ap->private_data;
542 struct sas_ha_struct *ha = dev->port->ha;
543 unsigned long flags;
544
545 spin_lock_irqsave(&ha->lock, flags);
546 if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state))
547 ha->eh_active--;
548 spin_unlock_irqrestore(&ha->lock, flags);
549}
550
526static struct ata_port_operations sas_sata_ops = { 551static struct ata_port_operations sas_sata_ops = {
527 .prereset = ata_std_prereset, 552 .prereset = ata_std_prereset,
528 .hardreset = sas_ata_hard_reset, 553 .hardreset = sas_ata_hard_reset,
@@ -536,6 +561,8 @@ static struct ata_port_operations sas_sata_ops = {
536 .port_start = ata_sas_port_start, 561 .port_start = ata_sas_port_start,
537 .port_stop = ata_sas_port_stop, 562 .port_stop = ata_sas_port_stop,
538 .set_dmamode = sas_ata_set_dmamode, 563 .set_dmamode = sas_ata_set_dmamode,
564 .sched_eh = sas_ata_sched_eh,
565 .end_eh = sas_ata_end_eh,
539}; 566};
540 567
541static struct ata_port_info sata_port_info = { 568static struct ata_port_info sata_port_info = {
@@ -591,7 +618,6 @@ void sas_ata_task_abort(struct sas_task *task)
591 spin_lock_irqsave(q->queue_lock, flags); 618 spin_lock_irqsave(q->queue_lock, flags);
592 blk_abort_request(qc->scsicmd->request); 619 blk_abort_request(qc->scsicmd->request);
593 spin_unlock_irqrestore(q->queue_lock, flags); 620 spin_unlock_irqrestore(q->queue_lock, flags);
594 scsi_schedule_eh(qc->scsicmd->device->host);
595 return; 621 return;
596 } 622 }
597 623
@@ -708,10 +734,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
708 struct ata_port *ap = dev->sata_dev.ap; 734 struct ata_port *ap = dev->sata_dev.ap;
709 struct sas_ha_struct *ha = dev->port->ha; 735 struct sas_ha_struct *ha = dev->port->ha;
710 736
711 /* hold a reference over eh since we may be racing with final
712 * remove once all commands are completed
713 */
714 kref_get(&dev->kref);
715 sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n"); 737 sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
716 ata_scsi_port_error_handler(ha->core.shost, ap); 738 ata_scsi_port_error_handler(ha->core.shost, ap);
717 sas_put_device(dev); 739 sas_put_device(dev);
@@ -720,7 +742,7 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
720void sas_ata_strategy_handler(struct Scsi_Host *shost) 742void sas_ata_strategy_handler(struct Scsi_Host *shost)
721{ 743{
722 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 744 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
723 LIST_HEAD(async); 745 ASYNC_DOMAIN_EXCLUSIVE(async);
724 int i; 746 int i;
725 747
726 /* it's ok to defer revalidation events during ata eh, these 748 /* it's ok to defer revalidation events during ata eh, these
@@ -742,6 +764,13 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
742 list_for_each_entry(dev, &port->dev_list, dev_list_node) { 764 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
743 if (!dev_is_sata(dev)) 765 if (!dev_is_sata(dev))
744 continue; 766 continue;
767
768 /* hold a reference over eh since we may be
769 * racing with final remove once all commands
770 * are completed
771 */
772 kref_get(&dev->kref);
773
745 async_schedule_domain(async_sas_ata_eh, dev, &async); 774 async_schedule_domain(async_sas_ata_eh, dev, &async);
746 } 775 }
747 spin_unlock(&port->dev_list_lock); 776 spin_unlock(&port->dev_list_lock);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 629a0865b130..3e9dc1a84358 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -39,18 +39,13 @@ void sas_init_dev(struct domain_device *dev)
39{ 39{
40 switch (dev->dev_type) { 40 switch (dev->dev_type) {
41 case SAS_END_DEV: 41 case SAS_END_DEV:
42 INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
42 break; 43 break;
43 case EDGE_DEV: 44 case EDGE_DEV:
44 case FANOUT_DEV: 45 case FANOUT_DEV:
45 INIT_LIST_HEAD(&dev->ex_dev.children); 46 INIT_LIST_HEAD(&dev->ex_dev.children);
46 mutex_init(&dev->ex_dev.cmd_mutex); 47 mutex_init(&dev->ex_dev.cmd_mutex);
47 break; 48 break;
48 case SATA_DEV:
49 case SATA_PM:
50 case SATA_PM_PORT:
51 case SATA_PENDING:
52 INIT_LIST_HEAD(&dev->sata_dev.children);
53 break;
54 default: 49 default:
55 break; 50 break;
56 } 51 }
@@ -286,6 +281,8 @@ void sas_free_device(struct kref *kref)
286 281
287static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev) 282static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
288{ 283{
284 struct sas_ha_struct *ha = port->ha;
285
289 sas_notify_lldd_dev_gone(dev); 286 sas_notify_lldd_dev_gone(dev);
290 if (!dev->parent) 287 if (!dev->parent)
291 dev->port->port_dev = NULL; 288 dev->port->port_dev = NULL;
@@ -294,8 +291,18 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
294 291
295 spin_lock_irq(&port->dev_list_lock); 292 spin_lock_irq(&port->dev_list_lock);
296 list_del_init(&dev->dev_list_node); 293 list_del_init(&dev->dev_list_node);
294 if (dev_is_sata(dev))
295 sas_ata_end_eh(dev->sata_dev.ap);
297 spin_unlock_irq(&port->dev_list_lock); 296 spin_unlock_irq(&port->dev_list_lock);
298 297
298 spin_lock_irq(&ha->lock);
299 if (dev->dev_type == SAS_END_DEV &&
300 !list_empty(&dev->ssp_dev.eh_list_node)) {
301 list_del_init(&dev->ssp_dev.eh_list_node);
302 ha->eh_active--;
303 }
304 spin_unlock_irq(&ha->lock);
305
299 sas_put_device(dev); 306 sas_put_device(dev);
300} 307}
301 308
@@ -488,9 +495,9 @@ static void sas_chain_event(int event, unsigned long *pending,
488 if (!test_and_set_bit(event, pending)) { 495 if (!test_and_set_bit(event, pending)) {
489 unsigned long flags; 496 unsigned long flags;
490 497
491 spin_lock_irqsave(&ha->state_lock, flags); 498 spin_lock_irqsave(&ha->lock, flags);
492 sas_chain_work(ha, sw); 499 sas_chain_work(ha, sw);
493 spin_unlock_irqrestore(&ha->state_lock, flags); 500 spin_unlock_irqrestore(&ha->lock, flags);
494 } 501 }
495} 502}
496 503
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 4e4292d210c1..789c4d8bb7a7 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -47,9 +47,9 @@ static void sas_queue_event(int event, unsigned long *pending,
47 if (!test_and_set_bit(event, pending)) { 47 if (!test_and_set_bit(event, pending)) {
48 unsigned long flags; 48 unsigned long flags;
49 49
50 spin_lock_irqsave(&ha->state_lock, flags); 50 spin_lock_irqsave(&ha->lock, flags);
51 sas_queue_work(ha, work); 51 sas_queue_work(ha, work);
52 spin_unlock_irqrestore(&ha->state_lock, flags); 52 spin_unlock_irqrestore(&ha->lock, flags);
53 } 53 }
54} 54}
55 55
@@ -61,18 +61,18 @@ void __sas_drain_work(struct sas_ha_struct *ha)
61 61
62 set_bit(SAS_HA_DRAINING, &ha->state); 62 set_bit(SAS_HA_DRAINING, &ha->state);
63 /* flush submitters */ 63 /* flush submitters */
64 spin_lock_irq(&ha->state_lock); 64 spin_lock_irq(&ha->lock);
65 spin_unlock_irq(&ha->state_lock); 65 spin_unlock_irq(&ha->lock);
66 66
67 drain_workqueue(wq); 67 drain_workqueue(wq);
68 68
69 spin_lock_irq(&ha->state_lock); 69 spin_lock_irq(&ha->lock);
70 clear_bit(SAS_HA_DRAINING, &ha->state); 70 clear_bit(SAS_HA_DRAINING, &ha->state);
71 list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { 71 list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
72 list_del_init(&sw->drain_node); 72 list_del_init(&sw->drain_node);
73 sas_queue_work(ha, sw); 73 sas_queue_work(ha, sw);
74 } 74 }
75 spin_unlock_irq(&ha->state_lock); 75 spin_unlock_irq(&ha->lock);
76} 76}
77 77
78int sas_drain_work(struct sas_ha_struct *ha) 78int sas_drain_work(struct sas_ha_struct *ha)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index caa0525d2523..efc6e72f09f3 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -51,14 +51,14 @@ static void smp_task_timedout(unsigned long _task)
51 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 51 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
52 spin_unlock_irqrestore(&task->task_state_lock, flags); 52 spin_unlock_irqrestore(&task->task_state_lock, flags);
53 53
54 complete(&task->completion); 54 complete(&task->slow_task->completion);
55} 55}
56 56
57static void smp_task_done(struct sas_task *task) 57static void smp_task_done(struct sas_task *task)
58{ 58{
59 if (!del_timer(&task->timer)) 59 if (!del_timer(&task->slow_task->timer))
60 return; 60 return;
61 complete(&task->completion); 61 complete(&task->slow_task->completion);
62} 62}
63 63
64/* Give it some long enough timeout. In seconds. */ 64/* Give it some long enough timeout. In seconds. */
@@ -79,7 +79,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
79 break; 79 break;
80 } 80 }
81 81
82 task = sas_alloc_task(GFP_KERNEL); 82 task = sas_alloc_slow_task(GFP_KERNEL);
83 if (!task) { 83 if (!task) {
84 res = -ENOMEM; 84 res = -ENOMEM;
85 break; 85 break;
@@ -91,20 +91,20 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
91 91
92 task->task_done = smp_task_done; 92 task->task_done = smp_task_done;
93 93
94 task->timer.data = (unsigned long) task; 94 task->slow_task->timer.data = (unsigned long) task;
95 task->timer.function = smp_task_timedout; 95 task->slow_task->timer.function = smp_task_timedout;
96 task->timer.expires = jiffies + SMP_TIMEOUT*HZ; 96 task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
97 add_timer(&task->timer); 97 add_timer(&task->slow_task->timer);
98 98
99 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); 99 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
100 100
101 if (res) { 101 if (res) {
102 del_timer(&task->timer); 102 del_timer(&task->slow_task->timer);
103 SAS_DPRINTK("executing SMP task failed:%d\n", res); 103 SAS_DPRINTK("executing SMP task failed:%d\n", res);
104 break; 104 break;
105 } 105 }
106 106
107 wait_for_completion(&task->completion); 107 wait_for_completion(&task->slow_task->completion);
108 res = -ECOMM; 108 res = -ECOMM;
109 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 109 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
110 SAS_DPRINTK("smp task timed out or aborted\n"); 110 SAS_DPRINTK("smp task timed out or aborted\n");
@@ -868,7 +868,7 @@ static struct domain_device *sas_ex_discover_end_dev(
868} 868}
869 869
870/* See if this phy is part of a wide port */ 870/* See if this phy is part of a wide port */
871static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id) 871static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
872{ 872{
873 struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; 873 struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
874 int i; 874 int i;
@@ -884,11 +884,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
884 sas_port_add_phy(ephy->port, phy->phy); 884 sas_port_add_phy(ephy->port, phy->phy);
885 phy->port = ephy->port; 885 phy->port = ephy->port;
886 phy->phy_state = PHY_DEVICE_DISCOVERED; 886 phy->phy_state = PHY_DEVICE_DISCOVERED;
887 return 0; 887 return true;
888 } 888 }
889 } 889 }
890 890
891 return -ENODEV; 891 return false;
892} 892}
893 893
894static struct domain_device *sas_ex_discover_expander( 894static struct domain_device *sas_ex_discover_expander(
@@ -1030,8 +1030,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1030 return res; 1030 return res;
1031 } 1031 }
1032 1032
1033 res = sas_ex_join_wide_port(dev, phy_id); 1033 if (sas_ex_join_wide_port(dev, phy_id)) {
1034 if (!res) {
1035 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", 1034 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
1036 phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); 1035 phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
1037 return res; 1036 return res;
@@ -1077,8 +1076,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1077 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == 1076 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
1078 SAS_ADDR(child->sas_addr)) { 1077 SAS_ADDR(child->sas_addr)) {
1079 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; 1078 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
1080 res = sas_ex_join_wide_port(dev, i); 1079 if (sas_ex_join_wide_port(dev, i))
1081 if (!res)
1082 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", 1080 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
1083 i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); 1081 i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
1084 1082
@@ -1943,32 +1941,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
1943{ 1941{
1944 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; 1942 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
1945 struct domain_device *child; 1943 struct domain_device *child;
1946 bool found = false; 1944 int res;
1947 int res, i;
1948 1945
1949 SAS_DPRINTK("ex %016llx phy%d new device attached\n", 1946 SAS_DPRINTK("ex %016llx phy%d new device attached\n",
1950 SAS_ADDR(dev->sas_addr), phy_id); 1947 SAS_ADDR(dev->sas_addr), phy_id);
1951 res = sas_ex_phy_discover(dev, phy_id); 1948 res = sas_ex_phy_discover(dev, phy_id);
1952 if (res) 1949 if (res)
1953 goto out; 1950 return res;
1954 /* to support the wide port inserted */ 1951
1955 for (i = 0; i < dev->ex_dev.num_phys; i++) { 1952 if (sas_ex_join_wide_port(dev, phy_id))
1956 struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
1957 if (i == phy_id)
1958 continue;
1959 if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
1960 SAS_ADDR(ex_phy->attached_sas_addr)) {
1961 found = true;
1962 break;
1963 }
1964 }
1965 if (found) {
1966 sas_ex_join_wide_port(dev, phy_id);
1967 return 0; 1953 return 0;
1968 } 1954
1969 res = sas_ex_discover_devices(dev, phy_id); 1955 res = sas_ex_discover_devices(dev, phy_id);
1970 if (!res) 1956 if (res)
1971 goto out; 1957 return res;
1972 list_for_each_entry(child, &dev->ex_dev.children, siblings) { 1958 list_for_each_entry(child, &dev->ex_dev.children, siblings) {
1973 if (SAS_ADDR(child->sas_addr) == 1959 if (SAS_ADDR(child->sas_addr) ==
1974 SAS_ADDR(ex_phy->attached_sas_addr)) { 1960 SAS_ADDR(ex_phy->attached_sas_addr)) {
@@ -1978,7 +1964,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
1978 break; 1964 break;
1979 } 1965 }
1980 } 1966 }
1981out:
1982 return res; 1967 return res;
1983} 1968}
1984 1969
@@ -2005,6 +1990,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
2005 u8 sas_addr[8]; 1990 u8 sas_addr[8];
2006 int res; 1991 int res;
2007 1992
1993 memset(sas_addr, 0, 8);
2008 res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); 1994 res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
2009 switch (res) { 1995 switch (res) {
2010 case SMP_RESP_NO_PHY: 1996 case SMP_RESP_NO_PHY:
@@ -2017,9 +2003,13 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
2017 return res; 2003 return res;
2018 case SMP_RESP_FUNC_ACC: 2004 case SMP_RESP_FUNC_ACC:
2019 break; 2005 break;
2006 case -ECOMM:
2007 break;
2008 default:
2009 return res;
2020 } 2010 }
2021 2011
2022 if (SAS_ADDR(sas_addr) == 0) { 2012 if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
2023 phy->phy_state = PHY_EMPTY; 2013 phy->phy_state = PHY_EMPTY;
2024 sas_unregister_devs_sas_addr(dev, phy_id, last); 2014 sas_unregister_devs_sas_addr(dev, phy_id, last);
2025 return res; 2015 return res;
@@ -2109,9 +2099,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
2109 struct domain_device *dev = NULL; 2099 struct domain_device *dev = NULL;
2110 2100
2111 res = sas_find_bcast_dev(port_dev, &dev); 2101 res = sas_find_bcast_dev(port_dev, &dev);
2112 if (res) 2102 while (res == 0 && dev) {
2113 goto out;
2114 if (dev) {
2115 struct expander_device *ex = &dev->ex_dev; 2103 struct expander_device *ex = &dev->ex_dev;
2116 int i = 0, phy_id; 2104 int i = 0, phy_id;
2117 2105
@@ -2123,8 +2111,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
2123 res = sas_rediscover(dev, phy_id); 2111 res = sas_rediscover(dev, phy_id);
2124 i = phy_id + 1; 2112 i = phy_id + 1;
2125 } while (i < ex->num_phys); 2113 } while (i < ex->num_phys);
2114
2115 dev = NULL;
2116 res = sas_find_bcast_dev(port_dev, &dev);
2126 } 2117 }
2127out:
2128 return res; 2118 return res;
2129} 2119}
2130 2120
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 10cb5ae30977..014297c05880 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -48,18 +48,37 @@ struct sas_task *sas_alloc_task(gfp_t flags)
48 INIT_LIST_HEAD(&task->list); 48 INIT_LIST_HEAD(&task->list);
49 spin_lock_init(&task->task_state_lock); 49 spin_lock_init(&task->task_state_lock);
50 task->task_state_flags = SAS_TASK_STATE_PENDING; 50 task->task_state_flags = SAS_TASK_STATE_PENDING;
51 init_timer(&task->timer);
52 init_completion(&task->completion);
53 } 51 }
54 52
55 return task; 53 return task;
56} 54}
57EXPORT_SYMBOL_GPL(sas_alloc_task); 55EXPORT_SYMBOL_GPL(sas_alloc_task);
58 56
57struct sas_task *sas_alloc_slow_task(gfp_t flags)
58{
59 struct sas_task *task = sas_alloc_task(flags);
60 struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
61
62 if (!task || !slow) {
63 if (task)
64 kmem_cache_free(sas_task_cache, task);
65 kfree(slow);
66 return NULL;
67 }
68
69 task->slow_task = slow;
70 init_timer(&slow->timer);
71 init_completion(&slow->completion);
72
73 return task;
74}
75EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
76
59void sas_free_task(struct sas_task *task) 77void sas_free_task(struct sas_task *task)
60{ 78{
61 if (task) { 79 if (task) {
62 BUG_ON(!list_empty(&task->list)); 80 BUG_ON(!list_empty(&task->list));
81 kfree(task->slow_task);
63 kmem_cache_free(sas_task_cache, task); 82 kmem_cache_free(sas_task_cache, task);
64 } 83 }
65} 84}
@@ -114,9 +133,11 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
114 sas_ha->lldd_queue_size = 128; /* Sanity */ 133 sas_ha->lldd_queue_size = 128; /* Sanity */
115 134
116 set_bit(SAS_HA_REGISTERED, &sas_ha->state); 135 set_bit(SAS_HA_REGISTERED, &sas_ha->state);
117 spin_lock_init(&sas_ha->state_lock); 136 spin_lock_init(&sas_ha->lock);
118 mutex_init(&sas_ha->drain_mutex); 137 mutex_init(&sas_ha->drain_mutex);
138 init_waitqueue_head(&sas_ha->eh_wait_q);
119 INIT_LIST_HEAD(&sas_ha->defer_q); 139 INIT_LIST_HEAD(&sas_ha->defer_q);
140 INIT_LIST_HEAD(&sas_ha->eh_dev_q);
120 141
121 error = sas_register_phys(sas_ha); 142 error = sas_register_phys(sas_ha);
122 if (error) { 143 if (error) {
@@ -163,9 +184,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
163 * events to be queued, and flush any in-progress drainers 184 * events to be queued, and flush any in-progress drainers
164 */ 185 */
165 mutex_lock(&sas_ha->drain_mutex); 186 mutex_lock(&sas_ha->drain_mutex);
166 spin_lock_irq(&sas_ha->state_lock); 187 spin_lock_irq(&sas_ha->lock);
167 clear_bit(SAS_HA_REGISTERED, &sas_ha->state); 188 clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
168 spin_unlock_irq(&sas_ha->state_lock); 189 spin_unlock_irq(&sas_ha->lock);
169 __sas_drain_work(sas_ha); 190 __sas_drain_work(sas_ha);
170 mutex_unlock(&sas_ha->drain_mutex); 191 mutex_unlock(&sas_ha->drain_mutex);
171 192
@@ -411,9 +432,9 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
411 d->reset_result = 0; 432 d->reset_result = 0;
412 d->hard_reset = hard_reset; 433 d->hard_reset = hard_reset;
413 434
414 spin_lock_irq(&ha->state_lock); 435 spin_lock_irq(&ha->lock);
415 sas_queue_work(ha, &d->reset_work); 436 sas_queue_work(ha, &d->reset_work);
416 spin_unlock_irq(&ha->state_lock); 437 spin_unlock_irq(&ha->lock);
417 438
418 rc = sas_drain_work(ha); 439 rc = sas_drain_work(ha);
419 if (rc == 0) 440 if (rc == 0)
@@ -438,9 +459,9 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
438 d->enable_result = 0; 459 d->enable_result = 0;
439 d->enable = enable; 460 d->enable = enable;
440 461
441 spin_lock_irq(&ha->state_lock); 462 spin_lock_irq(&ha->lock);
442 sas_queue_work(ha, &d->enable_work); 463 sas_queue_work(ha, &d->enable_work);
443 spin_unlock_irq(&ha->state_lock); 464 spin_unlock_irq(&ha->lock);
444 465
445 rc = sas_drain_work(ha); 466 rc = sas_drain_work(ha);
446 if (rc == 0) 467 if (rc == 0)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f0b9b7bf1882..6e795a174a12 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -460,14 +460,109 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev)
460} 460}
461EXPORT_SYMBOL_GPL(sas_get_local_phy); 461EXPORT_SYMBOL_GPL(sas_get_local_phy);
462 462
463static void sas_wait_eh(struct domain_device *dev)
464{
465 struct sas_ha_struct *ha = dev->port->ha;
466 DEFINE_WAIT(wait);
467
468 if (dev_is_sata(dev)) {
469 ata_port_wait_eh(dev->sata_dev.ap);
470 return;
471 }
472 retry:
473 spin_lock_irq(&ha->lock);
474
475 while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
476 prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
477 spin_unlock_irq(&ha->lock);
478 schedule();
479 spin_lock_irq(&ha->lock);
480 }
481 finish_wait(&ha->eh_wait_q, &wait);
482
483 spin_unlock_irq(&ha->lock);
484
485 /* make sure SCSI EH is complete */
486 if (scsi_host_in_recovery(ha->core.shost)) {
487 msleep(10);
488 goto retry;
489 }
490}
491EXPORT_SYMBOL(sas_wait_eh);
492
493static int sas_queue_reset(struct domain_device *dev, int reset_type, int lun, int wait)
494{
495 struct sas_ha_struct *ha = dev->port->ha;
496 int scheduled = 0, tries = 100;
497
498 /* ata: promote lun reset to bus reset */
499 if (dev_is_sata(dev)) {
500 sas_ata_schedule_reset(dev);
501 if (wait)
502 sas_ata_wait_eh(dev);
503 return SUCCESS;
504 }
505
506 while (!scheduled && tries--) {
507 spin_lock_irq(&ha->lock);
508 if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
509 !test_bit(reset_type, &dev->state)) {
510 scheduled = 1;
511 ha->eh_active++;
512 list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
513 set_bit(SAS_DEV_EH_PENDING, &dev->state);
514 set_bit(reset_type, &dev->state);
515 int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
516 scsi_schedule_eh(ha->core.shost);
517 }
518 spin_unlock_irq(&ha->lock);
519
520 if (wait)
521 sas_wait_eh(dev);
522
523 if (scheduled)
524 return SUCCESS;
525 }
526
527 SAS_DPRINTK("%s reset of %s failed\n",
528 reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
529 dev_name(&dev->rphy->dev));
530
531 return FAILED;
532}
533
534int sas_eh_abort_handler(struct scsi_cmnd *cmd)
535{
536 int res;
537 struct sas_task *task = TO_SAS_TASK(cmd);
538 struct Scsi_Host *host = cmd->device->host;
539 struct sas_internal *i = to_sas_internal(host->transportt);
540
541 if (current != host->ehandler)
542 return FAILED;
543
544 if (!i->dft->lldd_abort_task)
545 return FAILED;
546
547 res = i->dft->lldd_abort_task(task);
548 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
549 return SUCCESS;
550
551 return FAILED;
552}
553EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
554
463/* Attempt to send a LUN reset message to a device */ 555/* Attempt to send a LUN reset message to a device */
464int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) 556int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
465{ 557{
466 struct domain_device *dev = cmd_to_domain_dev(cmd);
467 struct sas_internal *i =
468 to_sas_internal(dev->port->ha->core.shost->transportt);
469 struct scsi_lun lun;
470 int res; 558 int res;
559 struct scsi_lun lun;
560 struct Scsi_Host *host = cmd->device->host;
561 struct domain_device *dev = cmd_to_domain_dev(cmd);
562 struct sas_internal *i = to_sas_internal(host->transportt);
563
564 if (current != host->ehandler)
565 return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
471 566
472 int_to_scsilun(cmd->device->lun, &lun); 567 int_to_scsilun(cmd->device->lun, &lun);
473 568
@@ -481,21 +576,22 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
481 return FAILED; 576 return FAILED;
482} 577}
483 578
484/* Attempt to send a phy (bus) reset */
485int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) 579int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
486{ 580{
487 struct domain_device *dev = cmd_to_domain_dev(cmd);
488 struct sas_phy *phy = sas_get_local_phy(dev);
489 int res; 581 int res;
582 struct Scsi_Host *host = cmd->device->host;
583 struct domain_device *dev = cmd_to_domain_dev(cmd);
584 struct sas_internal *i = to_sas_internal(host->transportt);
490 585
491 res = sas_phy_reset(phy, 1); 586 if (current != host->ehandler)
492 if (res) 587 return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
493 SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
494 kobject_name(&phy->dev.kobj),
495 res);
496 sas_put_local_phy(phy);
497 588
498 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 589 if (!i->dft->lldd_I_T_nexus_reset)
590 return FAILED;
591
592 res = i->dft->lldd_I_T_nexus_reset(dev);
593 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
594 res == -ENODEV)
499 return SUCCESS; 595 return SUCCESS;
500 596
501 return FAILED; 597 return FAILED;
@@ -667,16 +763,53 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
667 goto out; 763 goto out;
668} 764}
669 765
766static void sas_eh_handle_resets(struct Scsi_Host *shost)
767{
768 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
769 struct sas_internal *i = to_sas_internal(shost->transportt);
770
771 /* handle directed resets to sas devices */
772 spin_lock_irq(&ha->lock);
773 while (!list_empty(&ha->eh_dev_q)) {
774 struct domain_device *dev;
775 struct ssp_device *ssp;
776
777 ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
778 list_del_init(&ssp->eh_list_node);
779 dev = container_of(ssp, typeof(*dev), ssp_dev);
780 kref_get(&dev->kref);
781 WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
782
783 spin_unlock_irq(&ha->lock);
784
785 if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
786 i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
787
788 if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
789 i->dft->lldd_I_T_nexus_reset(dev);
790
791 sas_put_device(dev);
792 spin_lock_irq(&ha->lock);
793 clear_bit(SAS_DEV_EH_PENDING, &dev->state);
794 ha->eh_active--;
795 }
796 spin_unlock_irq(&ha->lock);
797}
798
799
670void sas_scsi_recover_host(struct Scsi_Host *shost) 800void sas_scsi_recover_host(struct Scsi_Host *shost)
671{ 801{
672 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 802 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
673 unsigned long flags;
674 LIST_HEAD(eh_work_q); 803 LIST_HEAD(eh_work_q);
804 int tries = 0;
805 bool retry;
675 806
676 spin_lock_irqsave(shost->host_lock, flags); 807retry:
808 tries++;
809 retry = true;
810 spin_lock_irq(shost->host_lock);
677 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 811 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
678 shost->host_eh_scheduled = 0; 812 spin_unlock_irq(shost->host_lock);
679 spin_unlock_irqrestore(shost->host_lock, flags);
680 813
681 SAS_DPRINTK("Enter %s busy: %d failed: %d\n", 814 SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
682 __func__, shost->host_busy, shost->host_failed); 815 __func__, shost->host_busy, shost->host_failed);
@@ -705,13 +838,26 @@ out:
705 if (ha->lldd_max_execute_num > 1) 838 if (ha->lldd_max_execute_num > 1)
706 wake_up_process(ha->core.queue_thread); 839 wake_up_process(ha->core.queue_thread);
707 840
841 sas_eh_handle_resets(shost);
842
708 /* now link into libata eh --- if we have any ata devices */ 843 /* now link into libata eh --- if we have any ata devices */
709 sas_ata_strategy_handler(shost); 844 sas_ata_strategy_handler(shost);
710 845
711 scsi_eh_flush_done_q(&ha->eh_done_q); 846 scsi_eh_flush_done_q(&ha->eh_done_q);
712 847
713 SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n", 848 /* check if any new eh work was scheduled during the last run */
714 __func__, shost->host_busy, shost->host_failed); 849 spin_lock_irq(&ha->lock);
850 if (ha->eh_active == 0) {
851 shost->host_eh_scheduled = 0;
852 retry = false;
853 }
854 spin_unlock_irq(&ha->lock);
855
856 if (retry)
857 goto retry;
858
859 SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
860 __func__, shost->host_busy, shost->host_failed, tries);
715} 861}
716 862
717enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 863enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
@@ -988,9 +1134,13 @@ void sas_task_abort(struct sas_task *task)
988 1134
989 /* Escape for libsas internal commands */ 1135 /* Escape for libsas internal commands */
990 if (!sc) { 1136 if (!sc) {
991 if (!del_timer(&task->timer)) 1137 struct sas_task_slow *slow = task->slow_task;
1138
1139 if (!slow)
1140 return;
1141 if (!del_timer(&slow->timer))
992 return; 1142 return;
993 task->timer.function(task->timer.data); 1143 slow->timer.function(slow->timer.data);
994 return; 1144 return;
995 } 1145 }
996 1146
@@ -1003,7 +1153,6 @@ void sas_task_abort(struct sas_task *task)
1003 spin_lock_irqsave(q->queue_lock, flags); 1153 spin_lock_irqsave(q->queue_lock, flags);
1004 blk_abort_request(sc->request); 1154 blk_abort_request(sc->request);
1005 spin_unlock_irqrestore(q->queue_lock, flags); 1155 spin_unlock_irqrestore(q->queue_lock, flags);
1006 scsi_schedule_eh(sc->device->host);
1007 } 1156 }
1008} 1157}
1009 1158
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index fe5d396aca73..e2516ba8ebfa 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -22,7 +22,9 @@
22ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage 22ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
23ccflags-$(GCOV) += -O0 23ccflags-$(GCOV) += -O0
24 24
25ifdef WARNINGS_BECOME_ERRORS
25ccflags-y += -Werror 26ccflags-y += -Werror
27endif
26 28
27obj-$(CONFIG_SCSI_LPFC) := lpfc.o 29obj-$(CONFIG_SCSI_LPFC) := lpfc.o
28 30
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e5da6da20f8a..a65c05a8d488 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -96,6 +96,10 @@ struct lpfc_sli2_slim;
96/* queue dump line buffer size */ 96/* queue dump line buffer size */
97#define LPFC_LBUF_SZ 128 97#define LPFC_LBUF_SZ 128
98 98
99/* mailbox system shutdown options */
100#define LPFC_MBX_NO_WAIT 0
101#define LPFC_MBX_WAIT 1
102
99enum lpfc_polling_flags { 103enum lpfc_polling_flags {
100 ENABLE_FCP_RING_POLLING = 0x1, 104 ENABLE_FCP_RING_POLLING = 0x1,
101 DISABLE_FCP_RING_INT = 0x2 105 DISABLE_FCP_RING_INT = 0x2
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5eb2bc116183..adef5bb2100e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3617,6 +3617,91 @@ lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
3617static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR, 3617static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
3618 lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store); 3618 lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
3619 3619
3620/**
3621 * lpfc_fcp_imax_store
3622 *
3623 * @dev: class device that is converted into a Scsi_host.
3624 * @attr: device attribute, not used.
3625 * @buf: string with the number of fast-path FCP interrupts per second.
3626 * @count: unused variable.
3627 *
3628 * Description:
3629 * If val is in a valid range [636,651042], then set the adapter's
3630 * maximum number of fast-path FCP interrupts per second.
3631 *
3632 * Returns:
3633 * length of the buf on success if val is in range the intended mode
3634 * is supported.
3635 * -EINVAL if val out of range or intended mode is not supported.
3636 **/
3637static ssize_t
3638lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
3639 const char *buf, size_t count)
3640{
3641 struct Scsi_Host *shost = class_to_shost(dev);
3642 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
3643 struct lpfc_hba *phba = vport->phba;
3644 int val = 0, i;
3645
3646 /* Sanity check on user data */
3647 if (!isdigit(buf[0]))
3648 return -EINVAL;
3649 if (sscanf(buf, "%i", &val) != 1)
3650 return -EINVAL;
3651
3652 /* Value range is [636,651042] */
3653 if (val < LPFC_MIM_IMAX || val > LPFC_DMULT_CONST)
3654 return -EINVAL;
3655
3656 phba->cfg_fcp_imax = (uint32_t)val;
3657 for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY)
3658 lpfc_modify_fcp_eq_delay(phba, i);
3659
3660 return strlen(buf);
3661}
3662
3663/*
3664# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
3665#
3666# Value range is [636,651042]. Default value is 10000.
3667*/
3668static int lpfc_fcp_imax = LPFC_FP_DEF_IMAX;
3669module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
3670MODULE_PARM_DESC(lpfc_fcp_imax,
3671 "Set the maximum number of fast-path FCP interrupts per second");
3672lpfc_param_show(fcp_imax)
3673
3674/**
3675 * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
3676 * @phba: lpfc_hba pointer.
3677 * @val: link speed value.
3678 *
3679 * Description:
3680 * If val is in a valid range [636,651042], then initialize the adapter's
3681 * maximum number of fast-path FCP interrupts per second.
3682 *
3683 * Returns:
3684 * zero if val saved.
3685 * -EINVAL val out of range
3686 **/
3687static int
3688lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
3689{
3690 if (val >= LPFC_MIM_IMAX && val <= LPFC_DMULT_CONST) {
3691 phba->cfg_fcp_imax = val;
3692 return 0;
3693 }
3694
3695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3696 "3016 fcp_imax: %d out of range, using default\n", val);
3697 phba->cfg_fcp_imax = LPFC_FP_DEF_IMAX;
3698
3699 return 0;
3700}
3701
3702static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
3703 lpfc_fcp_imax_show, lpfc_fcp_imax_store);
3704
3620/* 3705/*
3621# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 3706# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
3622# Value range is [2,3]. Default value is 3. 3707# Value range is [2,3]. Default value is 3.
@@ -3758,14 +3843,6 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
3758 "MSI-X (2), if possible"); 3843 "MSI-X (2), if possible");
3759 3844
3760/* 3845/*
3761# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
3762#
3763# Value range is [636,651042]. Default value is 10000.
3764*/
3765LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
3766 "Set the maximum number of fast-path FCP interrupts per second");
3767
3768/*
3769# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues 3846# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
3770# 3847#
3771# Value range is [1,31]. Default value is 4. 3848# Value range is [1,31]. Default value is 4.
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 9b2a16f3bc79..8a2a514a2553 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -183,7 +183,7 @@ int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
183void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); 183void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
184int lpfc_online(struct lpfc_hba *); 184int lpfc_online(struct lpfc_hba *);
185void lpfc_unblock_mgmt_io(struct lpfc_hba *); 185void lpfc_unblock_mgmt_io(struct lpfc_hba *);
186void lpfc_offline_prep(struct lpfc_hba *); 186void lpfc_offline_prep(struct lpfc_hba *, int);
187void lpfc_offline(struct lpfc_hba *); 187void lpfc_offline(struct lpfc_hba *);
188void lpfc_reset_hba(struct lpfc_hba *); 188void lpfc_reset_hba(struct lpfc_hba *);
189 189
@@ -273,7 +273,7 @@ int lpfc_sli_host_down(struct lpfc_vport *);
273int lpfc_sli_hba_down(struct lpfc_hba *); 273int lpfc_sli_hba_down(struct lpfc_hba *);
274int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 274int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
275int lpfc_sli_handle_mb_event(struct lpfc_hba *); 275int lpfc_sli_handle_mb_event(struct lpfc_hba *);
276void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); 276void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *, int);
277int lpfc_sli_check_eratt(struct lpfc_hba *); 277int lpfc_sli_check_eratt(struct lpfc_hba *);
278void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 278void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
279 struct lpfc_sli_ring *, uint32_t); 279 struct lpfc_sli_ring *, uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 616c400dae14..afe368fd1b98 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -395,8 +395,13 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
395 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 395 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
396 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 396 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
397 break; 397 break;
398 if (fcp_cqidx >= phba->cfg_fcp_eq_count) 398 if (phba->intr_type == MSIX) {
399 return; 399 if (fcp_cqidx >= phba->cfg_fcp_eq_count)
400 return;
401 } else {
402 if (fcp_cqidx > 0)
403 return;
404 }
400 405
401 printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n", 406 printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
402 fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 407 fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
@@ -426,8 +431,13 @@ lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx)
426 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 431 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
427 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 432 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
428 break; 433 break;
429 if (fcp_cqidx >= phba->cfg_fcp_eq_count) 434 if (phba->intr_type == MSIX) {
430 return; 435 if (fcp_cqidx >= phba->cfg_fcp_eq_count)
436 return;
437 } else {
438 if (fcp_cqidx > 0)
439 return;
440 }
431 441
432 if (phba->cfg_fcp_eq_count == 0) { 442 if (phba->cfg_fcp_eq_count == 0) {
433 fcp_eqidx = -1; 443 fcp_eqidx = -1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 5bb269e224f6..9b4f92941dce 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -530,7 +530,7 @@ lpfc_work_list_done(struct lpfc_hba *phba)
530 break; 530 break;
531 case LPFC_EVT_OFFLINE_PREP: 531 case LPFC_EVT_OFFLINE_PREP:
532 if (phba->link_state >= LPFC_LINK_DOWN) 532 if (phba->link_state >= LPFC_LINK_DOWN)
533 lpfc_offline_prep(phba); 533 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
534 *(int *)(evtp->evt_arg1) = 0; 534 *(int *)(evtp->evt_arg1) = 0;
535 complete((struct completion *)(evtp->evt_arg2)); 535 complete((struct completion *)(evtp->evt_arg2));
536 break; 536 break;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f1946dfda5b4..953603a7a43c 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -874,6 +874,7 @@ struct mbox_header {
874#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 874#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
875#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 875#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
876#define LPFC_MBOX_OPCODE_NOP 0x21 876#define LPFC_MBOX_OPCODE_NOP 0x21
877#define LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY 0x29
877#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 878#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
878#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 879#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
879#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 880#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
@@ -940,6 +941,13 @@ struct eq_context {
940 uint32_t reserved3; 941 uint32_t reserved3;
941}; 942};
942 943
944struct eq_delay_info {
945 uint32_t eq_id;
946 uint32_t phase;
947 uint32_t delay_multi;
948};
949#define LPFC_MAX_EQ_DELAY 8
950
943struct sgl_page_pairs { 951struct sgl_page_pairs {
944 uint32_t sgl_pg0_addr_lo; 952 uint32_t sgl_pg0_addr_lo;
945 uint32_t sgl_pg0_addr_hi; 953 uint32_t sgl_pg0_addr_hi;
@@ -1002,6 +1010,19 @@ struct lpfc_mbx_eq_create {
1002 } u; 1010 } u;
1003}; 1011};
1004 1012
1013struct lpfc_mbx_modify_eq_delay {
1014 struct mbox_header header;
1015 union {
1016 struct {
1017 uint32_t num_eq;
1018 struct eq_delay_info eq[LPFC_MAX_EQ_DELAY];
1019 } request;
1020 struct {
1021 uint32_t word0;
1022 } response;
1023 } u;
1024};
1025
1005struct lpfc_mbx_eq_destroy { 1026struct lpfc_mbx_eq_destroy {
1006 struct mbox_header header; 1027 struct mbox_header header;
1007 union { 1028 union {
@@ -2875,6 +2896,7 @@ struct lpfc_mqe {
2875 struct lpfc_mbx_mq_create mq_create; 2896 struct lpfc_mbx_mq_create mq_create;
2876 struct lpfc_mbx_mq_create_ext mq_create_ext; 2897 struct lpfc_mbx_mq_create_ext mq_create_ext;
2877 struct lpfc_mbx_eq_create eq_create; 2898 struct lpfc_mbx_eq_create eq_create;
2899 struct lpfc_mbx_modify_eq_delay eq_delay;
2878 struct lpfc_mbx_cq_create cq_create; 2900 struct lpfc_mbx_cq_create cq_create;
2879 struct lpfc_mbx_wq_create wq_create; 2901 struct lpfc_mbx_wq_create wq_create;
2880 struct lpfc_mbx_rq_create rq_create; 2902 struct lpfc_mbx_rq_create rq_create;
@@ -3084,6 +3106,28 @@ struct lpfc_acqe_fc_la {
3084#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2 3106#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2
3085}; 3107};
3086 3108
3109struct lpfc_acqe_misconfigured_event {
3110 struct {
3111 uint32_t word0;
3112#define lpfc_sli_misconfigured_port0_SHIFT 0
3113#define lpfc_sli_misconfigured_port0_MASK 0x000000FF
3114#define lpfc_sli_misconfigured_port0_WORD word0
3115#define lpfc_sli_misconfigured_port1_SHIFT 8
3116#define lpfc_sli_misconfigured_port1_MASK 0x000000FF
3117#define lpfc_sli_misconfigured_port1_WORD word0
3118#define lpfc_sli_misconfigured_port2_SHIFT 16
3119#define lpfc_sli_misconfigured_port2_MASK 0x000000FF
3120#define lpfc_sli_misconfigured_port2_WORD word0
3121#define lpfc_sli_misconfigured_port3_SHIFT 24
3122#define lpfc_sli_misconfigured_port3_MASK 0x000000FF
3123#define lpfc_sli_misconfigured_port3_WORD word0
3124 } theEvent;
3125#define LPFC_SLI_EVENT_STATUS_VALID 0x00
3126#define LPFC_SLI_EVENT_STATUS_NOT_PRESENT 0x01
3127#define LPFC_SLI_EVENT_STATUS_WRONG_TYPE 0x02
3128#define LPFC_SLI_EVENT_STATUS_UNSUPPORTED 0x03
3129};
3130
3087struct lpfc_acqe_sli { 3131struct lpfc_acqe_sli {
3088 uint32_t event_data1; 3132 uint32_t event_data1;
3089 uint32_t event_data2; 3133 uint32_t event_data2;
@@ -3094,6 +3138,7 @@ struct lpfc_acqe_sli {
3094#define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3 3138#define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3
3095#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4 3139#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4
3096#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 3140#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
3141#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
3097}; 3142};
3098 3143
3099/* 3144/*
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 411ed48d79da..45c15208be9f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -73,6 +73,8 @@ static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 73static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 74static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 75static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76static void lpfc_sli4_disable_intr(struct lpfc_hba *);
77static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
76 78
77static struct scsi_transport_template *lpfc_transport_template = NULL; 79static struct scsi_transport_template *lpfc_transport_template = NULL;
78static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 80static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1169,7 +1171,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
1169 spin_lock_irq(&phba->hbalock); 1171 spin_lock_irq(&phba->hbalock);
1170 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1172 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1171 spin_unlock_irq(&phba->hbalock); 1173 spin_unlock_irq(&phba->hbalock);
1172 lpfc_offline_prep(phba); 1174 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1173 1175
1174 lpfc_offline(phba); 1176 lpfc_offline(phba);
1175 lpfc_reset_barrier(phba); 1177 lpfc_reset_barrier(phba);
@@ -1193,7 +1195,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
1193static void 1195static void
1194lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1196lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1195{ 1197{
1196 lpfc_offline_prep(phba); 1198 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1197 lpfc_offline(phba); 1199 lpfc_offline(phba);
1198 lpfc_sli4_brdreset(phba); 1200 lpfc_sli4_brdreset(phba);
1199 lpfc_hba_down_post(phba); 1201 lpfc_hba_down_post(phba);
@@ -1251,7 +1253,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1251 * There was a firmware error. Take the hba offline and then 1253 * There was a firmware error. Take the hba offline and then
1252 * attempt to restart it. 1254 * attempt to restart it.
1253 */ 1255 */
1254 lpfc_offline_prep(phba); 1256 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1255 lpfc_offline(phba); 1257 lpfc_offline(phba);
1256 1258
1257 /* Wait for the ER1 bit to clear.*/ 1259 /* Wait for the ER1 bit to clear.*/
@@ -1372,7 +1374,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1372 * There was a firmware error. Take the hba offline and then 1374 * There was a firmware error. Take the hba offline and then
1373 * attempt to restart it. 1375 * attempt to restart it.
1374 */ 1376 */
1375 lpfc_offline_prep(phba); 1377 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1376 lpfc_offline(phba); 1378 lpfc_offline(phba);
1377 lpfc_sli_brdrestart(phba); 1379 lpfc_sli_brdrestart(phba);
1378 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1380 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
@@ -1428,6 +1430,54 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1428} 1430}
1429 1431
1430/** 1432/**
1433 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1434 * @phba: pointer to lpfc hba data structure.
1435 * @mbx_action: flag for mailbox shutdown action.
1436 *
1437 * This routine is invoked to perform an SLI4 port PCI function reset in
1438 * response to port status register polling attention. It waits for port
1439 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1440 * During this process, interrupt vectors are freed and later requested
1441 * for handling possible port resource change.
1442 **/
1443static int
1444lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
1445{
1446 int rc;
1447 uint32_t intr_mode;
1448
1449 /*
1450 * On error status condition, driver need to wait for port
1451 * ready before performing reset.
1452 */
1453 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1454 if (!rc) {
1455 /* need reset: attempt for port recovery */
1456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1457 "2887 Reset Needed: Attempting Port "
1458 "Recovery...\n");
1459 lpfc_offline_prep(phba, mbx_action);
1460 lpfc_offline(phba);
1461 /* release interrupt for possible resource change */
1462 lpfc_sli4_disable_intr(phba);
1463 lpfc_sli_brdrestart(phba);
1464 /* request and enable interrupt */
1465 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1466 if (intr_mode == LPFC_INTR_ERROR) {
1467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1468 "3175 Failed to enable interrupt\n");
1469 return -EIO;
1470 } else {
1471 phba->intr_mode = intr_mode;
1472 }
1473 rc = lpfc_online(phba);
1474 if (rc == 0)
1475 lpfc_unblock_mgmt_io(phba);
1476 }
1477 return rc;
1478}
1479
1480/**
1431 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1481 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1432 * @phba: pointer to lpfc hba data structure. 1482 * @phba: pointer to lpfc hba data structure.
1433 * 1483 *
@@ -1506,30 +1556,18 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1506 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1556 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1508 "3145 Port Down: Provisioning\n"); 1558 "3145 Port Down: Provisioning\n");
1509 /* 1559
1510 * On error status condition, driver need to wait for port 1560 /* Check port status register for function reset */
1511 * ready before performing reset. 1561 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
1512 */ 1562 if (rc == 0) {
1513 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1563 /* don't report event on forced debug dump */
1514 if (!rc) { 1564 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1515 /* need reset: attempt for port recovery */ 1565 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1566 return;
1517 "2887 Reset Needed: Attempting Port " 1567 else
1518 "Recovery...\n"); 1568 break;
1519 lpfc_offline_prep(phba);
1520 lpfc_offline(phba);
1521 lpfc_sli_brdrestart(phba);
1522 if (lpfc_online(phba) == 0) {
1523 lpfc_unblock_mgmt_io(phba);
1524 /* don't report event on forced debug dump */
1525 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1526 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1527 return;
1528 else
1529 break;
1530 }
1531 /* fall through for not able to recover */
1532 } 1569 }
1570 /* fall through for not able to recover */
1533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1534 "3152 Unrecoverable error, bring the port " 1572 "3152 Unrecoverable error, bring the port "
1535 "offline\n"); 1573 "offline\n");
@@ -2494,15 +2532,19 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
2494 * driver prepares the HBA interface for online or offline. 2532 * driver prepares the HBA interface for online or offline.
2495 **/ 2533 **/
2496static void 2534static void
2497lpfc_block_mgmt_io(struct lpfc_hba * phba) 2535lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2498{ 2536{
2499 unsigned long iflag; 2537 unsigned long iflag;
2500 uint8_t actcmd = MBX_HEARTBEAT; 2538 uint8_t actcmd = MBX_HEARTBEAT;
2501 unsigned long timeout; 2539 unsigned long timeout;
2502 2540
2503 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2504 spin_lock_irqsave(&phba->hbalock, iflag); 2541 spin_lock_irqsave(&phba->hbalock, iflag);
2505 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2542 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2543 spin_unlock_irqrestore(&phba->hbalock, iflag);
2544 if (mbx_action == LPFC_MBX_NO_WAIT)
2545 return;
2546 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2547 spin_lock_irqsave(&phba->hbalock, iflag);
2506 if (phba->sli.mbox_active) { 2548 if (phba->sli.mbox_active) {
2507 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2549 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2508 /* Determine how long we might wait for the active mailbox 2550 /* Determine how long we might wait for the active mailbox
@@ -2592,7 +2634,7 @@ lpfc_online(struct lpfc_hba *phba)
2592 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2634 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2593 "0458 Bring Adapter online\n"); 2635 "0458 Bring Adapter online\n");
2594 2636
2595 lpfc_block_mgmt_io(phba); 2637 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
2596 2638
2597 if (!lpfc_sli_queue_setup(phba)) { 2639 if (!lpfc_sli_queue_setup(phba)) {
2598 lpfc_unblock_mgmt_io(phba); 2640 lpfc_unblock_mgmt_io(phba);
@@ -2660,7 +2702,7 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2660 * queue to make it ready to be brought offline. 2702 * queue to make it ready to be brought offline.
2661 **/ 2703 **/
2662void 2704void
2663lpfc_offline_prep(struct lpfc_hba * phba) 2705lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
2664{ 2706{
2665 struct lpfc_vport *vport = phba->pport; 2707 struct lpfc_vport *vport = phba->pport;
2666 struct lpfc_nodelist *ndlp, *next_ndlp; 2708 struct lpfc_nodelist *ndlp, *next_ndlp;
@@ -2671,7 +2713,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2671 if (vport->fc_flag & FC_OFFLINE_MODE) 2713 if (vport->fc_flag & FC_OFFLINE_MODE)
2672 return; 2714 return;
2673 2715
2674 lpfc_block_mgmt_io(phba); 2716 lpfc_block_mgmt_io(phba, mbx_action);
2675 2717
2676 lpfc_linkdown(phba); 2718 lpfc_linkdown(phba);
2677 2719
@@ -2718,7 +2760,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2718 } 2760 }
2719 lpfc_destroy_vport_work_array(phba, vports); 2761 lpfc_destroy_vport_work_array(phba, vports);
2720 2762
2721 lpfc_sli_mbox_sys_shutdown(phba); 2763 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
2722} 2764}
2723 2765
2724/** 2766/**
@@ -3684,12 +3726,76 @@ out_free_pmb:
3684static void 3726static void
3685lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3727lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3686{ 3728{
3687 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3729 char port_name;
3688 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3730 char message[80];
3689 "x%08x SLI Event Type:%d", 3731 uint8_t status;
3690 acqe_sli->event_data1, acqe_sli->event_data2, 3732 struct lpfc_acqe_misconfigured_event *misconfigured;
3691 bf_get(lpfc_trailer_type, acqe_sli)); 3733
3692 return; 3734 /* special case misconfigured event as it contains data for all ports */
3735 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3736 LPFC_SLI_INTF_IF_TYPE_2) ||
3737 (bf_get(lpfc_trailer_type, acqe_sli) !=
3738 LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
3739 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3740 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3741 "x%08x SLI Event Type:%d\n",
3742 acqe_sli->event_data1, acqe_sli->event_data2,
3743 bf_get(lpfc_trailer_type, acqe_sli));
3744 return;
3745 }
3746
3747 port_name = phba->Port[0];
3748 if (port_name == 0x00)
3749 port_name = '?'; /* get port name is empty */
3750
3751 misconfigured = (struct lpfc_acqe_misconfigured_event *)
3752 &acqe_sli->event_data1;
3753
3754 /* fetch the status for this port */
3755 switch (phba->sli4_hba.lnk_info.lnk_no) {
3756 case LPFC_LINK_NUMBER_0:
3757 status = bf_get(lpfc_sli_misconfigured_port0,
3758 &misconfigured->theEvent);
3759 break;
3760 case LPFC_LINK_NUMBER_1:
3761 status = bf_get(lpfc_sli_misconfigured_port1,
3762 &misconfigured->theEvent);
3763 break;
3764 case LPFC_LINK_NUMBER_2:
3765 status = bf_get(lpfc_sli_misconfigured_port2,
3766 &misconfigured->theEvent);
3767 break;
3768 case LPFC_LINK_NUMBER_3:
3769 status = bf_get(lpfc_sli_misconfigured_port3,
3770 &misconfigured->theEvent);
3771 break;
3772 default:
3773 status = ~LPFC_SLI_EVENT_STATUS_VALID;
3774 break;
3775 }
3776
3777 switch (status) {
3778 case LPFC_SLI_EVENT_STATUS_VALID:
3779 return; /* no message if the sfp is okay */
3780 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
3781 sprintf(message, "Not installed");
3782 break;
3783 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
3784 sprintf(message,
3785 "Optics of two types installed");
3786 break;
3787 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
3788 sprintf(message, "Incompatible optics");
3789 break;
3790 default:
3791 /* firmware is reporting a status we don't know about */
3792 sprintf(message, "Unknown event status x%02x", status);
3793 break;
3794 }
3795
3796 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3797 "3176 Misconfigured Physical Port - "
3798 "Port Name %c %s\n", port_name, message);
3693} 3799}
3694 3800
3695/** 3801/**
@@ -4312,7 +4418,7 @@ lpfc_reset_hba(struct lpfc_hba *phba)
4312 phba->link_state = LPFC_HBA_ERROR; 4418 phba->link_state = LPFC_HBA_ERROR;
4313 return; 4419 return;
4314 } 4420 }
4315 lpfc_offline_prep(phba); 4421 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4316 lpfc_offline(phba); 4422 lpfc_offline(phba);
4317 lpfc_sli_brdrestart(phba); 4423 lpfc_sli_brdrestart(phba);
4318 lpfc_online(phba); 4424 lpfc_online(phba);
@@ -5514,14 +5620,45 @@ lpfc_destroy_shost(struct lpfc_hba *phba)
5514static void 5620static void
5515lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5621lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5516{ 5622{
5623 uint32_t old_mask;
5624 uint32_t old_guard;
5625
5517 int pagecnt = 10; 5626 int pagecnt = 10;
5518 if (lpfc_prot_mask && lpfc_prot_guard) { 5627 if (lpfc_prot_mask && lpfc_prot_guard) {
5519 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5628 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5520 "1478 Registering BlockGuard with the " 5629 "1478 Registering BlockGuard with the "
5521 "SCSI layer\n"); 5630 "SCSI layer\n");
5522 scsi_host_set_prot(shost, lpfc_prot_mask); 5631
5523 scsi_host_set_guard(shost, lpfc_prot_guard); 5632 old_mask = lpfc_prot_mask;
5633 old_guard = lpfc_prot_guard;
5634
5635 /* Only allow supported values */
5636 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
5637 SHOST_DIX_TYPE0_PROTECTION |
5638 SHOST_DIX_TYPE1_PROTECTION);
5639 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
5640
5641 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
5642 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
5643 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
5644
5645 if (lpfc_prot_mask && lpfc_prot_guard) {
5646 if ((old_mask != lpfc_prot_mask) ||
5647 (old_guard != lpfc_prot_guard))
5648 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5649 "1475 Registering BlockGuard with the "
5650 "SCSI layer: mask %d guard %d\n",
5651 lpfc_prot_mask, lpfc_prot_guard);
5652
5653 scsi_host_set_prot(shost, lpfc_prot_mask);
5654 scsi_host_set_guard(shost, lpfc_prot_guard);
5655 } else
5656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5657 "1479 Not Registering BlockGuard with the SCSI "
5658 "layer, Bad protection parameters: %d %d\n",
5659 old_mask, old_guard);
5524 } 5660 }
5661
5525 if (!_dump_buf_data) { 5662 if (!_dump_buf_data) {
5526 while (pagecnt) { 5663 while (pagecnt) {
5527 spin_lock_init(&_dump_buf_lock); 5664 spin_lock_init(&_dump_buf_lock);
@@ -8859,7 +8996,7 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8859 "0473 PCI device Power Management suspend.\n"); 8996 "0473 PCI device Power Management suspend.\n");
8860 8997
8861 /* Bring down the device */ 8998 /* Bring down the device */
8862 lpfc_offline_prep(phba); 8999 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
8863 lpfc_offline(phba); 9000 lpfc_offline(phba);
8864 kthread_stop(phba->worker_thread); 9001 kthread_stop(phba->worker_thread);
8865 9002
@@ -8985,7 +9122,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8985 "2710 PCI channel disable preparing for reset\n"); 9122 "2710 PCI channel disable preparing for reset\n");
8986 9123
8987 /* Block any management I/Os to the device */ 9124 /* Block any management I/Os to the device */
8988 lpfc_block_mgmt_io(phba); 9125 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
8989 9126
8990 /* Block all SCSI devices' I/Os on the host */ 9127 /* Block all SCSI devices' I/Os on the host */
8991 lpfc_scsi_dev_block(phba); 9128 lpfc_scsi_dev_block(phba);
@@ -9129,7 +9266,7 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
9129 phba->intr_mode = intr_mode; 9266 phba->intr_mode = intr_mode;
9130 9267
9131 /* Take device offline, it will perform cleanup */ 9268 /* Take device offline, it will perform cleanup */
9132 lpfc_offline_prep(phba); 9269 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9133 lpfc_offline(phba); 9270 lpfc_offline(phba);
9134 lpfc_sli_brdrestart(phba); 9271 lpfc_sli_brdrestart(phba);
9135 9272
@@ -9603,7 +9740,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9603 "2843 PCI device Power Management suspend.\n"); 9740 "2843 PCI device Power Management suspend.\n");
9604 9741
9605 /* Bring down the device */ 9742 /* Bring down the device */
9606 lpfc_offline_prep(phba); 9743 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9607 lpfc_offline(phba); 9744 lpfc_offline(phba);
9608 kthread_stop(phba->worker_thread); 9745 kthread_stop(phba->worker_thread);
9609 9746
@@ -9729,7 +9866,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9729 "2826 PCI channel disable preparing for reset\n"); 9866 "2826 PCI channel disable preparing for reset\n");
9730 9867
9731 /* Block any management I/Os to the device */ 9868 /* Block any management I/Os to the device */
9732 lpfc_block_mgmt_io(phba); 9869 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
9733 9870
9734 /* Block all SCSI devices' I/Os on the host */ 9871 /* Block all SCSI devices' I/Os on the host */
9735 lpfc_scsi_dev_block(phba); 9872 lpfc_scsi_dev_block(phba);
@@ -9902,7 +10039,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
9902 */ 10039 */
9903 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 10040 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9904 /* Perform device reset */ 10041 /* Perform device reset */
9905 lpfc_offline_prep(phba); 10042 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9906 lpfc_offline(phba); 10043 lpfc_offline(phba);
9907 lpfc_sli_brdrestart(phba); 10044 lpfc_sli_brdrestart(phba);
9908 /* Bring the device back online */ 10045 /* Bring the device back online */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 66e09069f281..925975d2d765 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4275,10 +4275,8 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
4275 * Catch race where our node has transitioned, but the 4275 * Catch race where our node has transitioned, but the
4276 * transport is still transitioning. 4276 * transport is still transitioning.
4277 */ 4277 */
4278 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 4278 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4279 cmnd->result = ScsiResult(DID_IMM_RETRY, 0); 4279 goto out_tgt_busy;
4280 goto out_fail_command;
4281 }
4282 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) 4280 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4283 goto out_tgt_busy; 4281 goto out_tgt_busy;
4284 4282
@@ -4412,12 +4410,12 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4412 struct lpfc_iocbq *abtsiocb; 4410 struct lpfc_iocbq *abtsiocb;
4413 struct lpfc_scsi_buf *lpfc_cmd; 4411 struct lpfc_scsi_buf *lpfc_cmd;
4414 IOCB_t *cmd, *icmd; 4412 IOCB_t *cmd, *icmd;
4415 int ret = SUCCESS; 4413 int ret = SUCCESS, status = 0;
4416 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4414 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4417 4415
4418 ret = fc_block_scsi_eh(cmnd); 4416 status = fc_block_scsi_eh(cmnd);
4419 if (ret) 4417 if (status)
4420 return ret; 4418 return status;
4421 4419
4422 spin_lock_irq(&phba->hbalock); 4420 spin_lock_irq(&phba->hbalock);
4423 /* driver queued commands are in process of being flushed */ 4421 /* driver queued commands are in process of being flushed */
@@ -4435,7 +4433,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4435 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4433 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4436 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 4434 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4437 "x%x ID %d LUN %d\n", 4435 "x%x ID %d LUN %d\n",
4438 ret, cmnd->device->id, cmnd->device->lun); 4436 SUCCESS, cmnd->device->id, cmnd->device->lun);
4439 return SUCCESS; 4437 return SUCCESS;
4440 } 4438 }
4441 4439
@@ -4762,7 +4760,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
4762 unsigned tgt_id = cmnd->device->id; 4760 unsigned tgt_id = cmnd->device->id;
4763 unsigned int lun_id = cmnd->device->lun; 4761 unsigned int lun_id = cmnd->device->lun;
4764 struct lpfc_scsi_event_header scsi_event; 4762 struct lpfc_scsi_event_header scsi_event;
4765 int status; 4763 int status, ret = SUCCESS;
4766 4764
4767 if (!rdata) { 4765 if (!rdata) {
4768 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 4766 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -4803,9 +4801,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
4803 * So, continue on. 4801 * So, continue on.
4804 * We will report success if all the i/o aborts successfully. 4802 * We will report success if all the i/o aborts successfully.
4805 */ 4803 */
4806 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 4804 ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4807 LPFC_CTX_LUN); 4805 LPFC_CTX_LUN);
4808 return status; 4806 return ret;
4809} 4807}
4810 4808
4811/** 4809/**
@@ -4829,7 +4827,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
4829 unsigned tgt_id = cmnd->device->id; 4827 unsigned tgt_id = cmnd->device->id;
4830 unsigned int lun_id = cmnd->device->lun; 4828 unsigned int lun_id = cmnd->device->lun;
4831 struct lpfc_scsi_event_header scsi_event; 4829 struct lpfc_scsi_event_header scsi_event;
4832 int status; 4830 int status, ret = SUCCESS;
4833 4831
4834 if (!rdata) { 4832 if (!rdata) {
4835 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 4833 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -4870,9 +4868,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
4870 * So, continue on. 4868 * So, continue on.
4871 * We will report success if all the i/o aborts successfully. 4869 * We will report success if all the i/o aborts successfully.
4872 */ 4870 */
4873 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 4871 ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4874 LPFC_CTX_TGT); 4872 LPFC_CTX_TGT);
4875 return status; 4873 return ret;
4876} 4874}
4877 4875
4878/** 4876/**
@@ -4982,7 +4980,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
4982 struct lpfc_hba *phba = vport->phba; 4980 struct lpfc_hba *phba = vport->phba;
4983 int rc, ret = SUCCESS; 4981 int rc, ret = SUCCESS;
4984 4982
4985 lpfc_offline_prep(phba); 4983 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4986 lpfc_offline(phba); 4984 lpfc_offline(phba);
4987 rc = lpfc_sli_brdrestart(phba); 4985 rc = lpfc_sli_brdrestart(phba);
4988 if (rc) 4986 if (rc)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index b4720a109817..9cbd20b1328b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -8984,7 +8984,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
8984 int i; 8984 int i;
8985 8985
8986 /* Shutdown the mailbox command sub-system */ 8986 /* Shutdown the mailbox command sub-system */
8987 lpfc_sli_mbox_sys_shutdown(phba); 8987 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
8988 8988
8989 lpfc_hba_down_prep(phba); 8989 lpfc_hba_down_prep(phba);
8990 8990
@@ -9996,11 +9996,17 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
9996 * sub-system flush routine to gracefully bring down mailbox sub-system. 9996 * sub-system flush routine to gracefully bring down mailbox sub-system.
9997 **/ 9997 **/
9998void 9998void
9999lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) 9999lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
10000{ 10000{
10001 struct lpfc_sli *psli = &phba->sli; 10001 struct lpfc_sli *psli = &phba->sli;
10002 unsigned long timeout; 10002 unsigned long timeout;
10003 10003
10004 if (mbx_action == LPFC_MBX_NO_WAIT) {
10005 /* delay 100ms for port state */
10006 msleep(100);
10007 lpfc_sli_mbox_sys_flush(phba);
10008 return;
10009 }
10004 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 10010 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
10005 10011
10006 spin_lock_irq(&phba->hbalock); 10012 spin_lock_irq(&phba->hbalock);
@@ -12042,6 +12048,83 @@ out_fail:
12042} 12048}
12043 12049
12044/** 12050/**
12051 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12052 * @phba: HBA structure that indicates port to create a queue on.
12053 * @startq: The starting FCP EQ to modify
12054 *
12055 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
12056 *
12057 * The @phba struct is used to send mailbox command to HBA. The @startq
12058 * is used to get the starting FCP EQ to change.
12059 * This function is asynchronous and will wait for the mailbox
12060 * command to finish before continuing.
12061 *
12062 * On success this function will return a zero. If unable to allocate enough
12063 * memory this function will return -ENOMEM. If the queue create mailbox command
12064 * fails this function will return -ENXIO.
12065 **/
12066uint32_t
12067lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12068{
12069 struct lpfc_mbx_modify_eq_delay *eq_delay;
12070 LPFC_MBOXQ_t *mbox;
12071 struct lpfc_queue *eq;
12072 int cnt, rc, length, status = 0;
12073 uint32_t shdr_status, shdr_add_status;
12074 int fcp_eqidx;
12075 union lpfc_sli4_cfg_shdr *shdr;
12076 uint16_t dmult;
12077
12078 if (startq >= phba->cfg_fcp_eq_count)
12079 return 0;
12080
12081 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12082 if (!mbox)
12083 return -ENOMEM;
12084 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
12085 sizeof(struct lpfc_sli4_cfg_mhdr));
12086 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12087 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
12088 length, LPFC_SLI4_MBX_EMBED);
12089 eq_delay = &mbox->u.mqe.un.eq_delay;
12090
12091 /* Calculate delay multiper from maximum interrupt per second */
12092 dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1;
12093
12094 cnt = 0;
12095 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count;
12096 fcp_eqidx++) {
12097 eq = phba->sli4_hba.fp_eq[fcp_eqidx];
12098 if (!eq)
12099 continue;
12100 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
12101 eq_delay->u.request.eq[cnt].phase = 0;
12102 eq_delay->u.request.eq[cnt].delay_multi = dmult;
12103 cnt++;
12104 if (cnt >= LPFC_MAX_EQ_DELAY)
12105 break;
12106 }
12107 eq_delay->u.request.num_eq = cnt;
12108
12109 mbox->vport = phba->pport;
12110 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12111 mbox->context1 = NULL;
12112 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12113 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
12114 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12115 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12116 if (shdr_status || shdr_add_status || rc) {
12117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12118 "2512 MODIFY_EQ_DELAY mailbox failed with "
12119 "status x%x add_status x%x, mbx status x%x\n",
12120 shdr_status, shdr_add_status, rc);
12121 status = -ENXIO;
12122 }
12123 mempool_free(mbox, phba->mbox_mem_pool);
12124 return status;
12125}
12126
12127/**
12045 * lpfc_eq_create - Create an Event Queue on the HBA 12128 * lpfc_eq_create - Create an Event Queue on the HBA
12046 * @phba: HBA structure that indicates port to create a queue on. 12129 * @phba: HBA structure that indicates port to create a queue on.
12047 * @eq: The queue structure to use to create the event queue. 12130 * @eq: The queue structure to use to create the event queue.
@@ -12228,8 +12311,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
12228 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12311 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12229 "0361 Unsupported CQ count. (%d)\n", 12312 "0361 Unsupported CQ count. (%d)\n",
12230 cq->entry_count); 12313 cq->entry_count);
12231 if (cq->entry_count < 256) 12314 if (cq->entry_count < 256) {
12232 return -EINVAL; 12315 status = -EINVAL;
12316 goto out;
12317 }
12233 /* otherwise default to smallest count (drop through) */ 12318 /* otherwise default to smallest count (drop through) */
12234 case 256: 12319 case 256:
12235 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12320 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
@@ -12420,8 +12505,10 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
12420 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12505 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12421 "0362 Unsupported MQ count. (%d)\n", 12506 "0362 Unsupported MQ count. (%d)\n",
12422 mq->entry_count); 12507 mq->entry_count);
12423 if (mq->entry_count < 16) 12508 if (mq->entry_count < 16) {
12424 return -EINVAL; 12509 status = -EINVAL;
12510 goto out;
12511 }
12425 /* otherwise default to smallest count (drop through) */ 12512 /* otherwise default to smallest count (drop through) */
12426 case 16: 12513 case 16:
12427 bf_set(lpfc_mq_context_ring_size, 12514 bf_set(lpfc_mq_context_ring_size,
@@ -12710,8 +12797,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12710 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12797 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12711 "2535 Unsupported RQ count. (%d)\n", 12798 "2535 Unsupported RQ count. (%d)\n",
12712 hrq->entry_count); 12799 hrq->entry_count);
12713 if (hrq->entry_count < 512) 12800 if (hrq->entry_count < 512) {
12714 return -EINVAL; 12801 status = -EINVAL;
12802 goto out;
12803 }
12715 /* otherwise default to smallest count (drop through) */ 12804 /* otherwise default to smallest count (drop through) */
12716 case 512: 12805 case 512:
12717 bf_set(lpfc_rq_context_rqe_count, 12806 bf_set(lpfc_rq_context_rqe_count,
@@ -12791,8 +12880,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12791 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12792 "2536 Unsupported RQ count. (%d)\n", 12881 "2536 Unsupported RQ count. (%d)\n",
12793 drq->entry_count); 12882 drq->entry_count);
12794 if (drq->entry_count < 512) 12883 if (drq->entry_count < 512) {
12795 return -EINVAL; 12884 status = -EINVAL;
12885 goto out;
12886 }
12796 /* otherwise default to smallest count (drop through) */ 12887 /* otherwise default to smallest count (drop through) */
12797 case 512: 12888 case 512:
12798 bf_set(lpfc_rq_context_rqe_count, 12889 bf_set(lpfc_rq_context_rqe_count,
@@ -15855,24 +15946,18 @@ lpfc_drain_txq(struct lpfc_hba *phba)
15855 spin_lock_irqsave(&phba->hbalock, iflags); 15946 spin_lock_irqsave(&phba->hbalock, iflags);
15856 15947
15857 piocbq = lpfc_sli_ringtx_get(phba, pring); 15948 piocbq = lpfc_sli_ringtx_get(phba, pring);
15949 if (!piocbq) {
15950 spin_unlock_irqrestore(&phba->hbalock, iflags);
15951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15952 "2823 txq empty and txq_cnt is %d\n ",
15953 pring->txq_cnt);
15954 break;
15955 }
15858 sglq = __lpfc_sli_get_sglq(phba, piocbq); 15956 sglq = __lpfc_sli_get_sglq(phba, piocbq);
15859 if (!sglq) { 15957 if (!sglq) {
15860 __lpfc_sli_ringtx_put(phba, pring, piocbq); 15958 __lpfc_sli_ringtx_put(phba, pring, piocbq);
15861 spin_unlock_irqrestore(&phba->hbalock, iflags); 15959 spin_unlock_irqrestore(&phba->hbalock, iflags);
15862 break; 15960 break;
15863 } else {
15864 if (!piocbq) {
15865 /* The txq_cnt out of sync. This should
15866 * never happen
15867 */
15868 sglq = __lpfc_clear_active_sglq(phba,
15869 sglq->sli4_lxritag);
15870 spin_unlock_irqrestore(&phba->hbalock, iflags);
15871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15872 "2823 txq empty and txq_cnt is %d\n ",
15873 pring->txq_cnt);
15874 break;
15875 }
15876 } 15961 }
15877 15962
15878 /* The xri and iocb resources secured, 15963 /* The xri and iocb resources secured,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a4a77080091b..ec756118c5c1 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -598,6 +598,7 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
598 uint32_t); 598 uint32_t);
599void lpfc_sli4_queue_free(struct lpfc_queue *); 599void lpfc_sli4_queue_free(struct lpfc_queue *);
600uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); 600uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
601uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
601uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, 602uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
602 struct lpfc_queue *, uint32_t, uint32_t); 603 struct lpfc_queue *, uint32_t, uint32_t);
603int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, 604int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 59c57a409981..4704e5b5088e 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.31" 21#define LPFC_DRIVER_VERSION "8.3.32"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 4d39a9ffc081..97825f116954 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -524,7 +524,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
524 mega_passthru *pthru; 524 mega_passthru *pthru;
525 scb_t *scb; 525 scb_t *scb;
526 mbox_t *mbox; 526 mbox_t *mbox;
527 long seg; 527 u32 seg;
528 char islogical; 528 char islogical;
529 int max_ldrv_num; 529 int max_ldrv_num;
530 int channel = 0; 530 int channel = 0;
@@ -858,7 +858,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
858 858
859 /* Calculate Scatter-Gather info */ 859 /* Calculate Scatter-Gather info */
860 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, 860 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
861 (u32 *)&mbox->m_out.xferaddr, (u32 *)&seg); 861 (u32 *)&mbox->m_out.xferaddr, &seg);
862 862
863 return scb; 863 return scb;
864 864
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 35bd13879fed..54b1c5bb310f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -2731,7 +2731,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2731 } 2731 }
2732 2732
2733 out: 2733 out:
2734 spin_unlock_irq(&adapter->lock); 2734 spin_unlock(&adapter->lock);
2735 return rval; 2735 return rval;
2736} 2736}
2737 2737
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index b6dd3a5de7f9..b3a1a30055d6 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -1158,6 +1158,7 @@ extern struct scsi_transport_template *mpt2sas_transport_template;
1158extern int scsi_internal_device_block(struct scsi_device *sdev); 1158extern int scsi_internal_device_block(struct scsi_device *sdev);
1159extern u8 mpt2sas_stm_zero_smid_handler(struct MPT2SAS_ADAPTER *ioc, 1159extern u8 mpt2sas_stm_zero_smid_handler(struct MPT2SAS_ADAPTER *ioc,
1160 u8 msix_index, u32 reply); 1160 u8 msix_index, u32 reply);
1161extern int scsi_internal_device_unblock(struct scsi_device *sdev); 1161extern int scsi_internal_device_unblock(struct scsi_device *sdev,
1162 enum scsi_device_state new_state);
1162 1163
1163#endif /* MPT2SAS_BASE_H_INCLUDED */ 1164#endif /* MPT2SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 76973e8ca4ba..b1ebd6f8dab3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2904,7 +2904,7 @@ _scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc)
2904 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, " 2904 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, "
2905 "handle(0x%04x)\n", 2905 "handle(0x%04x)\n",
2906 sas_device_priv_data->sas_target->handle)); 2906 sas_device_priv_data->sas_target->handle));
2907 scsi_internal_device_unblock(sdev); 2907 scsi_internal_device_unblock(sdev, SDEV_RUNNING);
2908 } 2908 }
2909} 2909}
2910/** 2910/**
@@ -2933,7 +2933,7 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
2933 "sas address(0x%016llx)\n", ioc->name, 2933 "sas address(0x%016llx)\n", ioc->name,
2934 (unsigned long long)sas_address)); 2934 (unsigned long long)sas_address));
2935 sas_device_priv_data->block = 0; 2935 sas_device_priv_data->block = 0;
2936 scsi_internal_device_unblock(sdev); 2936 scsi_internal_device_unblock(sdev, SDEV_RUNNING);
2937 } 2937 }
2938 } 2938 }
2939} 2939}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index fd3b2839843b..4539d59a0857 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -885,7 +885,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
885 struct completion *completion, int is_tmf, 885 struct completion *completion, int is_tmf,
886 struct mvs_tmf_task *tmf) 886 struct mvs_tmf_task *tmf)
887{ 887{
888 struct domain_device *dev = task->dev;
889 struct mvs_info *mvi = NULL; 888 struct mvs_info *mvi = NULL;
890 u32 rc = 0; 889 u32 rc = 0;
891 u32 pass = 0; 890 u32 pass = 0;
@@ -1365,9 +1364,9 @@ void mvs_dev_gone(struct domain_device *dev)
1365 1364
1366static void mvs_task_done(struct sas_task *task) 1365static void mvs_task_done(struct sas_task *task)
1367{ 1366{
1368 if (!del_timer(&task->timer)) 1367 if (!del_timer(&task->slow_task->timer))
1369 return; 1368 return;
1370 complete(&task->completion); 1369 complete(&task->slow_task->completion);
1371} 1370}
1372 1371
1373static void mvs_tmf_timedout(unsigned long data) 1372static void mvs_tmf_timedout(unsigned long data)
@@ -1375,7 +1374,7 @@ static void mvs_tmf_timedout(unsigned long data)
1375 struct sas_task *task = (struct sas_task *)data; 1374 struct sas_task *task = (struct sas_task *)data;
1376 1375
1377 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1376 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1378 complete(&task->completion); 1377 complete(&task->slow_task->completion);
1379} 1378}
1380 1379
1381#define MVS_TASK_TIMEOUT 20 1380#define MVS_TASK_TIMEOUT 20
@@ -1386,7 +1385,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1386 struct sas_task *task = NULL; 1385 struct sas_task *task = NULL;
1387 1386
1388 for (retry = 0; retry < 3; retry++) { 1387 for (retry = 0; retry < 3; retry++) {
1389 task = sas_alloc_task(GFP_KERNEL); 1388 task = sas_alloc_slow_task(GFP_KERNEL);
1390 if (!task) 1389 if (!task)
1391 return -ENOMEM; 1390 return -ENOMEM;
1392 1391
@@ -1396,20 +1395,20 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1396 memcpy(&task->ssp_task, parameter, para_len); 1395 memcpy(&task->ssp_task, parameter, para_len);
1397 task->task_done = mvs_task_done; 1396 task->task_done = mvs_task_done;
1398 1397
1399 task->timer.data = (unsigned long) task; 1398 task->slow_task->timer.data = (unsigned long) task;
1400 task->timer.function = mvs_tmf_timedout; 1399 task->slow_task->timer.function = mvs_tmf_timedout;
1401 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1400 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1402 add_timer(&task->timer); 1401 add_timer(&task->slow_task->timer);
1403 1402
1404 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); 1403 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
1405 1404
1406 if (res) { 1405 if (res) {
1407 del_timer(&task->timer); 1406 del_timer(&task->slow_task->timer);
1408 mv_printk("executing internel task failed:%d\n", res); 1407 mv_printk("executing internel task failed:%d\n", res);
1409 goto ex_err; 1408 goto ex_err;
1410 } 1409 }
1411 1410
1412 wait_for_completion(&task->completion); 1411 wait_for_completion(&task->slow_task->completion);
1413 res = TMF_RESP_FUNC_FAILED; 1412 res = TMF_RESP_FUNC_FAILED;
1414 /* Even TMF timed out, return direct. */ 1413 /* Even TMF timed out, return direct. */
1415 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1414 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 3b11edd4a50c..b961112395d5 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -650,9 +650,9 @@ int pm8001_dev_found(struct domain_device *dev)
650 650
651static void pm8001_task_done(struct sas_task *task) 651static void pm8001_task_done(struct sas_task *task)
652{ 652{
653 if (!del_timer(&task->timer)) 653 if (!del_timer(&task->slow_task->timer))
654 return; 654 return;
655 complete(&task->completion); 655 complete(&task->slow_task->completion);
656} 656}
657 657
658static void pm8001_tmf_timedout(unsigned long data) 658static void pm8001_tmf_timedout(unsigned long data)
@@ -660,7 +660,7 @@ static void pm8001_tmf_timedout(unsigned long data)
660 struct sas_task *task = (struct sas_task *)data; 660 struct sas_task *task = (struct sas_task *)data;
661 661
662 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 662 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
663 complete(&task->completion); 663 complete(&task->slow_task->completion);
664} 664}
665 665
666#define PM8001_TASK_TIMEOUT 20 666#define PM8001_TASK_TIMEOUT 20
@@ -683,7 +683,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
683 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 683 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
684 684
685 for (retry = 0; retry < 3; retry++) { 685 for (retry = 0; retry < 3; retry++) {
686 task = sas_alloc_task(GFP_KERNEL); 686 task = sas_alloc_slow_task(GFP_KERNEL);
687 if (!task) 687 if (!task)
688 return -ENOMEM; 688 return -ENOMEM;
689 689
@@ -691,21 +691,21 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
691 task->task_proto = dev->tproto; 691 task->task_proto = dev->tproto;
692 memcpy(&task->ssp_task, parameter, para_len); 692 memcpy(&task->ssp_task, parameter, para_len);
693 task->task_done = pm8001_task_done; 693 task->task_done = pm8001_task_done;
694 task->timer.data = (unsigned long)task; 694 task->slow_task->timer.data = (unsigned long)task;
695 task->timer.function = pm8001_tmf_timedout; 695 task->slow_task->timer.function = pm8001_tmf_timedout;
696 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 696 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
697 add_timer(&task->timer); 697 add_timer(&task->slow_task->timer);
698 698
699 res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); 699 res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf);
700 700
701 if (res) { 701 if (res) {
702 del_timer(&task->timer); 702 del_timer(&task->slow_task->timer);
703 PM8001_FAIL_DBG(pm8001_ha, 703 PM8001_FAIL_DBG(pm8001_ha,
704 pm8001_printk("Executing internal task " 704 pm8001_printk("Executing internal task "
705 "failed\n")); 705 "failed\n"));
706 goto ex_err; 706 goto ex_err;
707 } 707 }
708 wait_for_completion(&task->completion); 708 wait_for_completion(&task->slow_task->completion);
709 res = -TMF_RESP_FUNC_FAILED; 709 res = -TMF_RESP_FUNC_FAILED;
710 /* Even TMF timed out, return direct. */ 710 /* Even TMF timed out, return direct. */
711 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 711 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
@@ -765,17 +765,17 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
765 struct sas_task *task = NULL; 765 struct sas_task *task = NULL;
766 766
767 for (retry = 0; retry < 3; retry++) { 767 for (retry = 0; retry < 3; retry++) {
768 task = sas_alloc_task(GFP_KERNEL); 768 task = sas_alloc_slow_task(GFP_KERNEL);
769 if (!task) 769 if (!task)
770 return -ENOMEM; 770 return -ENOMEM;
771 771
772 task->dev = dev; 772 task->dev = dev;
773 task->task_proto = dev->tproto; 773 task->task_proto = dev->tproto;
774 task->task_done = pm8001_task_done; 774 task->task_done = pm8001_task_done;
775 task->timer.data = (unsigned long)task; 775 task->slow_task->timer.data = (unsigned long)task;
776 task->timer.function = pm8001_tmf_timedout; 776 task->slow_task->timer.function = pm8001_tmf_timedout;
777 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; 777 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
778 add_timer(&task->timer); 778 add_timer(&task->slow_task->timer);
779 779
780 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 780 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
781 if (res) 781 if (res)
@@ -789,13 +789,13 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
789 pm8001_dev, flag, task_tag, ccb_tag); 789 pm8001_dev, flag, task_tag, ccb_tag);
790 790
791 if (res) { 791 if (res) {
792 del_timer(&task->timer); 792 del_timer(&task->slow_task->timer);
793 PM8001_FAIL_DBG(pm8001_ha, 793 PM8001_FAIL_DBG(pm8001_ha,
794 pm8001_printk("Executing internal task " 794 pm8001_printk("Executing internal task "
795 "failed\n")); 795 "failed\n"));
796 goto ex_err; 796 goto ex_err;
797 } 797 }
798 wait_for_completion(&task->completion); 798 wait_for_completion(&task->slow_task->completion);
799 res = TMF_RESP_FUNC_FAILED; 799 res = TMF_RESP_FUNC_FAILED;
800 /* Even TMF timed out, return direct. */ 800 /* Even TMF timed out, return direct. */
801 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 801 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
@@ -962,8 +962,9 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
962 struct pm8001_device *pm8001_dev; 962 struct pm8001_device *pm8001_dev;
963 struct pm8001_hba_info *pm8001_ha; 963 struct pm8001_hba_info *pm8001_ha;
964 struct sas_phy *phy; 964 struct sas_phy *phy;
965
965 if (!dev || !dev->lldd_dev) 966 if (!dev || !dev->lldd_dev)
966 return -1; 967 return -ENODEV;
967 968
968 pm8001_dev = dev->lldd_dev; 969 pm8001_dev = dev->lldd_dev;
969 pm8001_ha = pm8001_find_ha_by_dev(dev); 970 pm8001_ha = pm8001_find_ha_by_dev(dev);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ca5084743135..a44653b42161 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -685,7 +685,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
685 pcix_set_mmrbc(ha->pdev, 2048); 685 pcix_set_mmrbc(ha->pdev, 2048);
686 686
687 /* PCIe -- adjust Maximum Read Request Size (2048). */ 687 /* PCIe -- adjust Maximum Read Request Size (2048). */
688 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 688 if (pci_is_pcie(ha->pdev))
689 pcie_set_readrq(ha->pdev, 2048); 689 pcie_set_readrq(ha->pdev, 2048);
690 690
691 pci_disable_rom(ha->pdev); 691 pci_disable_rom(ha->pdev);
@@ -721,7 +721,7 @@ qla25xx_pci_config(scsi_qla_host_t *vha)
721 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 721 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
722 722
723 /* PCIe -- adjust Maximum Read Request Size (2048). */ 723 /* PCIe -- adjust Maximum Read Request Size (2048). */
724 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 724 if (pci_is_pcie(ha->pdev))
725 pcie_set_readrq(ha->pdev, 2048); 725 pcie_set_readrq(ha->pdev, 2048);
726 726
727 pci_disable_rom(ha->pdev); 727 pci_disable_rom(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index caf627ba7fa8..9ce3a8f8754f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1620,7 +1620,7 @@ qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1620 char lwstr[6]; 1620 char lwstr[6];
1621 uint16_t lnk; 1621 uint16_t lnk;
1622 1622
1623 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 1623 pcie_reg = pci_pcie_cap(ha->pdev);
1624 pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk); 1624 pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1625 ha->link_width = (lnk >> 4) & 0x3f; 1625 ha->link_width = (lnk >> 4) & 0x3f;
1626 1626
@@ -2528,7 +2528,7 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2528 } 2528 }
2529 2529
2530 /* Negotiated Link width */ 2530 /* Negotiated Link width */
2531 pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 2531 pcie_cap = pci_pcie_cap(ha->pdev);
2532 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); 2532 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2533 ha->link_width = (lnk >> 4) & 0x3f; 2533 ha->link_width = (lnk >> 4) & 0x3f;
2534 2534
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 6d1d873a20e2..fb8cd3847d4b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -482,12 +482,12 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
482 uint32_t pci_bus; 482 uint32_t pci_bus;
483 int pcie_reg; 483 int pcie_reg;
484 484
485 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 485 pcie_reg = pci_pcie_cap(ha->pdev);
486 if (pcie_reg) { 486 if (pcie_reg) {
487 char lwstr[6]; 487 char lwstr[6];
488 uint16_t pcie_lstat, lspeed, lwidth; 488 uint16_t pcie_lstat, lspeed, lwidth;
489 489
490 pcie_reg += 0x12; 490 pcie_reg += PCI_EXP_LNKCAP;
491 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); 491 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
492 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); 492 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
493 lwidth = (pcie_lstat & 493 lwidth = (pcie_lstat &
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 6986552b47e6..5b30132960c7 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2643,19 +2643,9 @@ static void qlt_do_work(struct work_struct *work)
2643 spin_lock_irqsave(&ha->hardware_lock, flags); 2643 spin_lock_irqsave(&ha->hardware_lock, flags);
2644 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 2644 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2645 atio->u.isp24.fcp_hdr.s_id); 2645 atio->u.isp24.fcp_hdr.s_id);
2646 if (sess) { 2646 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
2647 if (unlikely(sess->tearing_down)) { 2647 if (sess)
2648 sess = NULL; 2648 kref_get(&sess->se_sess->sess_kref);
2649 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2650 goto out_term;
2651 } else {
2652 /*
2653 * Do the extra kref_get() before dropping
2654 * qla_hw_data->hardware_lock.
2655 */
2656 kref_get(&sess->se_sess->sess_kref);
2657 }
2658 }
2659 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2649 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2660 2650
2661 if (unlikely(!sess)) { 2651 if (unlikely(!sess)) {
@@ -3960,7 +3950,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
3960{ 3950{
3961 struct qla_hw_data *ha = vha->hw; 3951 struct qla_hw_data *ha = vha->hw;
3962 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3952 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3963 int reason_code; 3953 int login_code;
3964 3954
3965 ql_dbg(ql_dbg_tgt, vha, 0xe039, 3955 ql_dbg(ql_dbg_tgt, vha, 0xe039,
3966 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", 3956 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
@@ -4003,9 +3993,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
4003 { 3993 {
4004 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 3994 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
4005 "qla_target(%d): Async LOOP_UP occured " 3995 "qla_target(%d): Async LOOP_UP occured "
4006 "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, 3996 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4007 le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), 3997 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4008 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); 3998 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4009 if (tgt->link_reinit_iocb_pending) { 3999 if (tgt->link_reinit_iocb_pending) {
4010 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 4000 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
4011 0, 0, 0, 0, 0, 0); 4001 0, 0, 0, 0, 0, 0);
@@ -4020,23 +4010,24 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
4020 case MBA_RSCN_UPDATE: 4010 case MBA_RSCN_UPDATE:
4021 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 4011 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
4022 "qla_target(%d): Async event %#x occured " 4012 "qla_target(%d): Async event %#x occured "
4023 "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code, 4013 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4024 le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), 4014 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4025 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); 4015 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4026 break; 4016 break;
4027 4017
4028 case MBA_PORT_UPDATE: 4018 case MBA_PORT_UPDATE:
4029 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 4019 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
4030 "qla_target(%d): Port update async event %#x " 4020 "qla_target(%d): Port update async event %#x "
4031 "occured: updating the ports database (m[1]=%x, m[2]=%x, " 4021 "occured: updating the ports database (m[0]=%x, m[1]=%x, "
4032 "m[3]=%x, m[4]=%x)", vha->vp_idx, code, 4022 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4033 le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), 4023 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4034 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); 4024 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4035 reason_code = le16_to_cpu(mailbox[2]); 4025
4036 if (reason_code == 0x4) 4026 login_code = le16_to_cpu(mailbox[2]);
4027 if (login_code == 0x4)
4037 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 4028 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
4038 "Async MB 2: Got PLOGI Complete\n"); 4029 "Async MB 2: Got PLOGI Complete\n");
4039 else if (reason_code == 0x7) 4030 else if (login_code == 0x7)
4040 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 4031 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
4041 "Async MB 2: Port Logged Out\n"); 4032 "Async MB 2: Port Logged Out\n");
4042 break; 4033 break;
@@ -4044,9 +4035,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
4044 default: 4035 default:
4045 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, 4036 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
4046 "qla_target(%d): Async event %#x occured: " 4037 "qla_target(%d): Async event %#x occured: "
4047 "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, 4038 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4048 code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), 4039 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4049 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); 4040 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4050 break; 4041 break;
4051 } 4042 }
4052 4043
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 9f9ef1644fd9..170af1571214 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -639,7 +639,7 @@ struct qla_tgt_func_tmpl {
639 639
640 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, 640 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
641 unsigned char *, uint32_t, int, int, int); 641 unsigned char *, uint32_t, int, int, int);
642 int (*handle_data)(struct qla_tgt_cmd *); 642 void (*handle_data)(struct qla_tgt_cmd *);
643 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, 643 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
644 uint32_t); 644 uint32_t);
645 void (*free_cmd)(struct qla_tgt_cmd *); 645 void (*free_cmd)(struct qla_tgt_cmd *);
@@ -813,7 +813,6 @@ struct qla_tgt_sess {
813 unsigned int conf_compl_supported:1; 813 unsigned int conf_compl_supported:1;
814 unsigned int deleted:1; 814 unsigned int deleted:1;
815 unsigned int local:1; 815 unsigned int local:1;
816 unsigned int tearing_down:1;
817 816
818 struct se_session *se_sess; 817 struct se_session *se_sess;
819 struct scsi_qla_host *vha; 818 struct scsi_qla_host *vha;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 6e64314dbbb3..4752f65a9272 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -38,8 +38,6 @@
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/configfs.h> 39#include <linux/configfs.h>
40#include <linux/ctype.h> 40#include <linux/ctype.h>
41#include <linux/string.h>
42#include <linux/ctype.h>
43#include <asm/unaligned.h> 41#include <asm/unaligned.h>
44#include <scsi/scsi.h> 42#include <scsi/scsi.h>
45#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
@@ -466,8 +464,7 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
466 vha = sess->vha; 464 vha = sess->vha;
467 465
468 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 466 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
469 sess->tearing_down = 1; 467 target_sess_cmd_list_set_waiting(se_sess);
470 target_splice_sess_cmd_list(se_sess);
471 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 468 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
472 469
473 return 1; 470 return 1;
@@ -600,28 +597,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
600 return -EINVAL; 597 return -EINVAL;
601 } 598 }
602 599
603 target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], 600 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
604 cmd->unpacked_lun, data_length, fcp_task_attr, 601 cmd->unpacked_lun, data_length, fcp_task_attr,
605 data_dir, flags); 602 data_dir, flags);
606 return 0;
607} 603}
608 604
609static void tcm_qla2xxx_do_rsp(struct work_struct *work) 605static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
610{ 606{
611 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 607 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
612 /*
613 * Dispatch ->queue_status from workqueue process context
614 */
615 transport_generic_request_failure(&cmd->se_cmd);
616}
617 608
618/*
619 * Called from qla_target.c:qlt_do_ctio_completion()
620 */
621static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
622{
623 struct se_cmd *se_cmd = &cmd->se_cmd;
624 unsigned long flags;
625 /* 609 /*
626 * Ensure that the complete FCP WRITE payload has been received. 610 * Ensure that the complete FCP WRITE payload has been received.
627 * Otherwise return an exception via CHECK_CONDITION status. 611 * Otherwise return an exception via CHECK_CONDITION status.
@@ -631,24 +615,26 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
631 * Check if se_cmd has already been aborted via LUN_RESET, and 615 * Check if se_cmd has already been aborted via LUN_RESET, and
632 * waiting upon completion in tcm_qla2xxx_write_pending_status() 616 * waiting upon completion in tcm_qla2xxx_write_pending_status()
633 */ 617 */
634 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 618 if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
635 if (se_cmd->transport_state & CMD_T_ABORTED) { 619 complete(&cmd->se_cmd.t_transport_stop_comp);
636 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 620 return;
637 complete(&se_cmd->t_transport_stop_comp);
638 return 0;
639 } 621 }
640 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
641 622
642 se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; 623 cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
643 INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp); 624 transport_generic_request_failure(&cmd->se_cmd);
644 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 625 return;
645 return 0;
646 } 626 }
647 /* 627
648 * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE 628 return target_execute_cmd(&cmd->se_cmd);
649 * status to the backstore processing thread. 629}
650 */ 630
651 return transport_generic_handle_data(&cmd->se_cmd); 631/*
632 * Called from qla_target.c:qlt_do_ctio_completion()
633 */
634static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
635{
636 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
637 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
652} 638}
653 639
654/* 640/*
@@ -1690,7 +1676,6 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1690 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1676 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1691 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1677 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1692 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1678 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1693 .new_cmd_map = NULL,
1694 .check_stop_free = tcm_qla2xxx_check_stop_free, 1679 .check_stop_free = tcm_qla2xxx_check_stop_free,
1695 .release_cmd = tcm_qla2xxx_release_cmd, 1680 .release_cmd = tcm_qla2xxx_release_cmd,
1696 .put_session = tcm_qla2xxx_put_session, 1681 .put_session = tcm_qla2xxx_put_session,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 96a5616a8fda..7fdba7f1ffb7 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -279,6 +279,7 @@ struct qla_ddb_index {
279 struct list_head list; 279 struct list_head list;
280 uint16_t fw_ddb_idx; 280 uint16_t fw_ddb_idx;
281 struct dev_db_entry fw_ddb; 281 struct dev_db_entry fw_ddb;
282 uint8_t flash_isid[6];
282}; 283};
283 284
284#define DDB_IPADDR_LEN 64 285#define DDB_IPADDR_LEN 64
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 20b49d019043..5b2525c4139e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -183,7 +183,8 @@ int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
183int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, 183int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
184 struct ddb_entry *ddb_entry, uint32_t state); 184 struct ddb_entry *ddb_entry, uint32_t state);
185void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset); 185void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
186int qla4xxx_post_aen_work(struct scsi_qla_host *ha, uint32_t aen_code, 186int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
187 enum iscsi_host_event_code aen_code,
187 uint32_t data_size, uint8_t *data); 188 uint32_t data_size, uint8_t *data);
188int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options, 189int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
189 uint32_t payload_size, uint32_t pid, uint8_t *ipaddr); 190 uint32_t payload_size, uint32_t pid, uint8_t *ipaddr);
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index bf36723b84e1..ddd9472066cb 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -126,7 +126,7 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
126 126
127 qla4xxx_init_response_q_entries(ha); 127 qla4xxx_init_response_q_entries(ha);
128 128
129 /* Initialize mabilbox active array */ 129 /* Initialize mailbox active array */
130 for (i = 0; i < MAX_MRB; i++) 130 for (i = 0; i < MAX_MRB; i++)
131 ha->active_mrb_array[i] = NULL; 131 ha->active_mrb_array[i] = NULL;
132 132
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 228b67020d2c..939d7261c37a 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1590,7 +1590,7 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1590 } 1590 }
1591 1591
1592 /* Negotiated Link width */ 1592 /* Negotiated Link width */
1593 pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 1593 pcie_cap = pci_pcie_cap(ha->pdev);
1594 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); 1594 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
1595 ha->link_width = (lnk >> 4) & 0x3f; 1595 ha->link_width = (lnk >> 4) & 0x3f;
1596 1596
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index cd15678f9ada..9da426628b97 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4299,7 +4299,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4299} 4299}
4300 4300
4301static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, 4301static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4302 struct ql4_tuple_ddb *tddb) 4302 struct ql4_tuple_ddb *tddb,
4303 uint8_t *flash_isid)
4303{ 4304{
4304 uint16_t options = 0; 4305 uint16_t options = 0;
4305 4306
@@ -4314,7 +4315,12 @@ static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4314 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 4315 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4315 4316
4316 tddb->port = le16_to_cpu(fw_ddb_entry->port); 4317 tddb->port = le16_to_cpu(fw_ddb_entry->port);
4317 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid)); 4318
4319 if (flash_isid == NULL)
4320 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
4321 sizeof(tddb->isid));
4322 else
4323 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
4318} 4324}
4319 4325
4320static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 4326static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
@@ -4385,7 +4391,7 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4385 goto exit_check; 4391 goto exit_check;
4386 } 4392 }
4387 4393
4388 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb); 4394 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4389 4395
4390 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 4396 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4391 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 4397 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
@@ -4407,6 +4413,102 @@ exit_check:
4407 return ret; 4413 return ret;
4408} 4414}
4409 4415
4416/**
4417 * qla4xxx_check_existing_isid - check if target with same isid exist
4418 * in target list
4419 * @list_nt: list of target
4420 * @isid: isid to check
4421 *
4422 * This routine return QLA_SUCCESS if target with same isid exist
4423 **/
4424static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
4425{
4426 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4427 struct dev_db_entry *fw_ddb_entry;
4428
4429 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4430 fw_ddb_entry = &nt_ddb_idx->fw_ddb;
4431
4432 if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
4433 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
4434 return QLA_SUCCESS;
4435 }
4436 }
4437 return QLA_ERROR;
4438}
4439
4440/**
4441 * qla4xxx_update_isid - compare ddbs and updated isid
4442 * @ha: Pointer to host adapter structure.
4443 * @list_nt: list of nt target
4444 * @fw_ddb_entry: firmware ddb entry
4445 *
4446 * This routine update isid if ddbs have same iqn, same isid and
4447 * different IP addr.
4448 * Return QLA_SUCCESS if isid is updated.
4449 **/
4450static int qla4xxx_update_isid(struct scsi_qla_host *ha,
4451 struct list_head *list_nt,
4452 struct dev_db_entry *fw_ddb_entry)
4453{
4454 uint8_t base_value, i;
4455
4456 base_value = fw_ddb_entry->isid[1] & 0x1f;
4457 for (i = 0; i < 8; i++) {
4458 fw_ddb_entry->isid[1] = (base_value | (i << 5));
4459 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4460 break;
4461 }
4462
4463 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4464 return QLA_ERROR;
4465
4466 return QLA_SUCCESS;
4467}
4468
4469/**
4470 * qla4xxx_should_update_isid - check if isid need to update
4471 * @ha: Pointer to host adapter structure.
4472 * @old_tddb: ddb tuple
4473 * @new_tddb: ddb tuple
4474 *
4475 * Return QLA_SUCCESS if different IP, different PORT, same iqn,
4476 * same isid
4477 **/
4478static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
4479 struct ql4_tuple_ddb *old_tddb,
4480 struct ql4_tuple_ddb *new_tddb)
4481{
4482 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
4483 /* Same ip */
4484 if (old_tddb->port == new_tddb->port)
4485 return QLA_ERROR;
4486 }
4487
4488 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4489 /* different iqn */
4490 return QLA_ERROR;
4491
4492 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4493 sizeof(old_tddb->isid)))
4494 /* different isid */
4495 return QLA_ERROR;
4496
4497 return QLA_SUCCESS;
4498}
4499
4500/**
4501 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
4502 * @ha: Pointer to host adapter structure.
4503 * @list_nt: list of nt target.
4504 * @fw_ddb_entry: firmware ddb entry.
4505 *
4506 * This routine check if fw_ddb_entry already exists in list_nt to avoid
4507 * duplicate ddb in list_nt.
4508 * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
4509 * Note: This function also update isid of DDB if required.
4510 **/
4511
4410static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, 4512static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4411 struct list_head *list_nt, 4513 struct list_head *list_nt,
4412 struct dev_db_entry *fw_ddb_entry) 4514 struct dev_db_entry *fw_ddb_entry)
@@ -4414,7 +4516,7 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4414 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 4516 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4415 struct ql4_tuple_ddb *fw_tddb = NULL; 4517 struct ql4_tuple_ddb *fw_tddb = NULL;
4416 struct ql4_tuple_ddb *tmp_tddb = NULL; 4518 struct ql4_tuple_ddb *tmp_tddb = NULL;
4417 int ret = QLA_ERROR; 4519 int rval, ret = QLA_ERROR;
4418 4520
4419 fw_tddb = vzalloc(sizeof(*fw_tddb)); 4521 fw_tddb = vzalloc(sizeof(*fw_tddb));
4420 if (!fw_tddb) { 4522 if (!fw_tddb) {
@@ -4432,12 +4534,28 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4432 goto exit_check; 4534 goto exit_check;
4433 } 4535 }
4434 4536
4435 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb); 4537 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4436 4538
4437 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 4539 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4438 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb); 4540 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
4439 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) { 4541 nt_ddb_idx->flash_isid);
4440 ret = QLA_SUCCESS; /* found */ 4542 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
4543 /* found duplicate ddb */
4544 if (ret == QLA_SUCCESS)
4545 goto exit_check;
4546 }
4547
4548 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4549 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
4550
4551 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
4552 if (ret == QLA_SUCCESS) {
4553 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
4554 if (rval == QLA_SUCCESS)
4555 ret = QLA_ERROR;
4556 else
4557 ret = QLA_SUCCESS;
4558
4441 goto exit_check; 4559 goto exit_check;
4442 } 4560 }
4443 } 4561 }
@@ -4788,14 +4906,26 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4788 4906
4789 nt_ddb_idx->fw_ddb_idx = idx; 4907 nt_ddb_idx->fw_ddb_idx = idx;
4790 4908
4791 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, 4909 /* Copy original isid as it may get updated in function
4792 sizeof(struct dev_db_entry)); 4910 * qla4xxx_update_isid(). We need original isid in
4793 4911 * function qla4xxx_compare_tuple_ddb to find duplicate
4794 if (qla4xxx_is_flash_ddb_exists(ha, list_nt, 4912 * target */
4795 fw_ddb_entry) == QLA_SUCCESS) { 4913 memcpy(&nt_ddb_idx->flash_isid[0],
4914 &fw_ddb_entry->isid[0],
4915 sizeof(nt_ddb_idx->flash_isid));
4916
4917 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
4918 fw_ddb_entry);
4919 if (ret == QLA_SUCCESS) {
4920 /* free nt_ddb_idx and do not add to list_nt */
4796 vfree(nt_ddb_idx); 4921 vfree(nt_ddb_idx);
4797 goto continue_next_nt; 4922 goto continue_next_nt;
4798 } 4923 }
4924
4925 /* Copy updated isid */
4926 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4927 sizeof(struct dev_db_entry));
4928
4799 list_add_tail(&nt_ddb_idx->list, list_nt); 4929 list_add_tail(&nt_ddb_idx->list, list_nt);
4800 } else if (is_reset == RESET_ADAPTER) { 4930 } else if (is_reset == RESET_ADAPTER) {
4801 if (qla4xxx_is_session_exists(ha, fw_ddb_entry) == 4931 if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index cc1cc3518b87..725034f4252c 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k17" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k18"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index bbbc9c918d4c..2936b447cae9 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -54,6 +54,7 @@
54#include <linux/notifier.h> 54#include <linux/notifier.h>
55#include <linux/cpu.h> 55#include <linux/cpu.h>
56#include <linux/mutex.h> 56#include <linux/mutex.h>
57#include <linux/async.h>
57 58
58#include <scsi/scsi.h> 59#include <scsi/scsi.h>
59#include <scsi/scsi_cmnd.h> 60#include <scsi/scsi_cmnd.h>
@@ -91,7 +92,7 @@ EXPORT_SYMBOL(scsi_logging_level);
91#endif 92#endif
92 93
93/* sd, scsi core and power management need to coordinate flushing async actions */ 94/* sd, scsi core and power management need to coordinate flushing async actions */
94LIST_HEAD(scsi_sd_probe_domain); 95ASYNC_DOMAIN(scsi_sd_probe_domain);
95EXPORT_SYMBOL(scsi_sd_probe_domain); 96EXPORT_SYMBOL(scsi_sd_probe_domain);
96 97
97/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 98/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
@@ -1354,6 +1355,7 @@ static void __exit exit_scsi(void)
1354 scsi_exit_devinfo(); 1355 scsi_exit_devinfo();
1355 scsi_exit_procfs(); 1356 scsi_exit_procfs();
1356 scsi_exit_queue(); 1357 scsi_exit_queue();
1358 async_unregister_domain(&scsi_sd_probe_domain);
1357} 1359}
1358 1360
1359subsys_initcall(init_scsi); 1361subsys_initcall(init_scsi);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d0f71e5d065f..4a6381c87253 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1687,6 +1687,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1687 * requests are started. 1687 * requests are started.
1688 */ 1688 */
1689 scsi_run_host_queues(shost); 1689 scsi_run_host_queues(shost);
1690
1691 /*
1692 * if eh is active and host_eh_scheduled is pending we need to re-run
1693 * recovery. we do this check after scsi_run_host_queues() to allow
1694 * everything pent up since the last eh run a chance to make forward
1695 * progress before we sync again. Either we'll immediately re-run
1696 * recovery or scsi_device_unbusy() will wake us again when these
1697 * pending commands complete.
1698 */
1699 spin_lock_irqsave(shost->host_lock, flags);
1700 if (shost->host_eh_scheduled)
1701 if (scsi_host_set_state(shost, SHOST_RECOVERY))
1702 WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
1703 spin_unlock_irqrestore(shost->host_lock, flags);
1690} 1704}
1691 1705
1692/** 1706/**
@@ -1804,15 +1818,14 @@ int scsi_error_handler(void *data)
1804 * We never actually get interrupted because kthread_run 1818 * We never actually get interrupted because kthread_run
1805 * disables signal delivery for the created thread. 1819 * disables signal delivery for the created thread.
1806 */ 1820 */
1807 set_current_state(TASK_INTERRUPTIBLE);
1808 while (!kthread_should_stop()) { 1821 while (!kthread_should_stop()) {
1822 set_current_state(TASK_INTERRUPTIBLE);
1809 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || 1823 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1810 shost->host_failed != shost->host_busy) { 1824 shost->host_failed != shost->host_busy) {
1811 SCSI_LOG_ERROR_RECOVERY(1, 1825 SCSI_LOG_ERROR_RECOVERY(1,
1812 printk("Error handler scsi_eh_%d sleeping\n", 1826 printk("Error handler scsi_eh_%d sleeping\n",
1813 shost->host_no)); 1827 shost->host_no));
1814 schedule(); 1828 schedule();
1815 set_current_state(TASK_INTERRUPTIBLE);
1816 continue; 1829 continue;
1817 } 1830 }
1818 1831
@@ -1849,7 +1862,6 @@ int scsi_error_handler(void *data)
1849 scsi_restart_operations(shost); 1862 scsi_restart_operations(shost);
1850 if (!shost->eh_noresume) 1863 if (!shost->eh_noresume)
1851 scsi_autopm_put_host(shost); 1864 scsi_autopm_put_host(shost);
1852 set_current_state(TASK_INTERRUPTIBLE);
1853 } 1865 }
1854 __set_current_state(TASK_RUNNING); 1866 __set_current_state(TASK_RUNNING);
1855 1867
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 08f1e297c735..ffd77739ae3e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -126,7 +126,7 @@ static void scsi_unprep_request(struct request *req)
126 * for a requeue after completion, which should only occur in this 126 * for a requeue after completion, which should only occur in this
127 * file. 127 * file.
128 */ 128 */
129static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 129static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
130{ 130{
131 struct Scsi_Host *host = cmd->device->host; 131 struct Scsi_Host *host = cmd->device->host;
132 struct scsi_device *device = cmd->device; 132 struct scsi_device *device = cmd->device;
@@ -172,15 +172,14 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
172 172
173 /* 173 /*
174 * Requeue this command. It will go before all other commands 174 * Requeue this command. It will go before all other commands
175 * that are already in the queue. 175 * that are already in the queue. Schedule requeue work under
176 * lock such that the kblockd_schedule_work() call happens
177 * before blk_cleanup_queue() finishes.
176 */ 178 */
177 spin_lock_irqsave(q->queue_lock, flags); 179 spin_lock_irqsave(q->queue_lock, flags);
178 blk_requeue_request(q, cmd->request); 180 blk_requeue_request(q, cmd->request);
179 spin_unlock_irqrestore(q->queue_lock, flags);
180
181 kblockd_schedule_work(q, &device->requeue_work); 181 kblockd_schedule_work(q, &device->requeue_work);
182 182 spin_unlock_irqrestore(q->queue_lock, flags);
183 return 0;
184} 183}
185 184
186/* 185/*
@@ -202,9 +201,9 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
202 * Notes: This could be called either from an interrupt context or a 201 * Notes: This could be called either from an interrupt context or a
203 * normal process context. 202 * normal process context.
204 */ 203 */
205int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 204void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
206{ 205{
207 return __scsi_queue_insert(cmd, reason, 1); 206 __scsi_queue_insert(cmd, reason, 1);
208} 207}
209/** 208/**
210 * scsi_execute - insert request and wait for the result 209 * scsi_execute - insert request and wait for the result
@@ -423,10 +422,6 @@ static void scsi_run_queue(struct request_queue *q)
423 LIST_HEAD(starved_list); 422 LIST_HEAD(starved_list);
424 unsigned long flags; 423 unsigned long flags;
425 424
426 /* if the device is dead, sdev will be NULL, so no queue to run */
427 if (!sdev)
428 return;
429
430 shost = sdev->host; 425 shost = sdev->host;
431 if (scsi_target(sdev)->single_lun) 426 if (scsi_target(sdev)->single_lun)
432 scsi_single_lun_run(sdev); 427 scsi_single_lun_run(sdev);
@@ -500,15 +495,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
500 */ 495 */
501static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 496static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
502{ 497{
498 struct scsi_device *sdev = cmd->device;
503 struct request *req = cmd->request; 499 struct request *req = cmd->request;
504 unsigned long flags; 500 unsigned long flags;
505 501
502 /*
503 * We need to hold a reference on the device to avoid the queue being
504 * killed after the unlock and before scsi_run_queue is invoked which
505 * may happen because scsi_unprep_request() puts the command which
506 * releases its reference on the device.
507 */
508 get_device(&sdev->sdev_gendev);
509
506 spin_lock_irqsave(q->queue_lock, flags); 510 spin_lock_irqsave(q->queue_lock, flags);
507 scsi_unprep_request(req); 511 scsi_unprep_request(req);
508 blk_requeue_request(q, req); 512 blk_requeue_request(q, req);
509 spin_unlock_irqrestore(q->queue_lock, flags); 513 spin_unlock_irqrestore(q->queue_lock, flags);
510 514
511 scsi_run_queue(q); 515 scsi_run_queue(q);
516
517 put_device(&sdev->sdev_gendev);
512} 518}
513 519
514void scsi_next_command(struct scsi_cmnd *cmd) 520void scsi_next_command(struct scsi_cmnd *cmd)
@@ -1190,6 +1196,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1190 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1196 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1191 switch (sdev->sdev_state) { 1197 switch (sdev->sdev_state) {
1192 case SDEV_OFFLINE: 1198 case SDEV_OFFLINE:
1199 case SDEV_TRANSPORT_OFFLINE:
1193 /* 1200 /*
1194 * If the device is offline we refuse to process any 1201 * If the device is offline we refuse to process any
1195 * commands. The device must be brought online 1202 * commands. The device must be brought online
@@ -1387,16 +1394,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1387 * may be changed after request stacking drivers call the function, 1394 * may be changed after request stacking drivers call the function,
1388 * regardless of taking lock or not. 1395 * regardless of taking lock or not.
1389 * 1396 *
1390 * When scsi can't dispatch I/Os anymore and needs to kill I/Os 1397 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1391 * (e.g. !sdev), scsi needs to return 'not busy'. 1398 * needs to return 'not busy'. Otherwise, request stacking drivers
1392 * Otherwise, request stacking drivers may hold requests forever. 1399 * may hold requests forever.
1393 */ 1400 */
1394static int scsi_lld_busy(struct request_queue *q) 1401static int scsi_lld_busy(struct request_queue *q)
1395{ 1402{
1396 struct scsi_device *sdev = q->queuedata; 1403 struct scsi_device *sdev = q->queuedata;
1397 struct Scsi_Host *shost; 1404 struct Scsi_Host *shost;
1398 1405
1399 if (!sdev) 1406 if (blk_queue_dead(q))
1400 return 0; 1407 return 0;
1401 1408
1402 shost = sdev->host; 1409 shost = sdev->host;
@@ -1507,12 +1514,6 @@ static void scsi_request_fn(struct request_queue *q)
1507 struct scsi_cmnd *cmd; 1514 struct scsi_cmnd *cmd;
1508 struct request *req; 1515 struct request *req;
1509 1516
1510 if (!sdev) {
1511 while ((req = blk_peek_request(q)) != NULL)
1512 scsi_kill_request(req, q);
1513 return;
1514 }
1515
1516 if(!get_device(&sdev->sdev_gendev)) 1517 if(!get_device(&sdev->sdev_gendev))
1517 /* We must be tearing the block queue down already */ 1518 /* We must be tearing the block queue down already */
1518 return; 1519 return;
@@ -1714,20 +1715,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1714 return q; 1715 return q;
1715} 1716}
1716 1717
1717void scsi_free_queue(struct request_queue *q)
1718{
1719 unsigned long flags;
1720
1721 WARN_ON(q->queuedata);
1722
1723 /* cause scsi_request_fn() to kill all non-finished requests */
1724 spin_lock_irqsave(q->queue_lock, flags);
1725 q->request_fn(q);
1726 spin_unlock_irqrestore(q->queue_lock, flags);
1727
1728 blk_cleanup_queue(q);
1729}
1730
1731/* 1718/*
1732 * Function: scsi_block_requests() 1719 * Function: scsi_block_requests()
1733 * 1720 *
@@ -2098,6 +2085,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2098 switch (oldstate) { 2085 switch (oldstate) {
2099 case SDEV_CREATED: 2086 case SDEV_CREATED:
2100 case SDEV_OFFLINE: 2087 case SDEV_OFFLINE:
2088 case SDEV_TRANSPORT_OFFLINE:
2101 case SDEV_QUIESCE: 2089 case SDEV_QUIESCE:
2102 case SDEV_BLOCK: 2090 case SDEV_BLOCK:
2103 break; 2091 break;
@@ -2110,6 +2098,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2110 switch (oldstate) { 2098 switch (oldstate) {
2111 case SDEV_RUNNING: 2099 case SDEV_RUNNING:
2112 case SDEV_OFFLINE: 2100 case SDEV_OFFLINE:
2101 case SDEV_TRANSPORT_OFFLINE:
2113 break; 2102 break;
2114 default: 2103 default:
2115 goto illegal; 2104 goto illegal;
@@ -2117,6 +2106,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2117 break; 2106 break;
2118 2107
2119 case SDEV_OFFLINE: 2108 case SDEV_OFFLINE:
2109 case SDEV_TRANSPORT_OFFLINE:
2120 switch (oldstate) { 2110 switch (oldstate) {
2121 case SDEV_CREATED: 2111 case SDEV_CREATED:
2122 case SDEV_RUNNING: 2112 case SDEV_RUNNING:
@@ -2153,6 +2143,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2153 case SDEV_RUNNING: 2143 case SDEV_RUNNING:
2154 case SDEV_QUIESCE: 2144 case SDEV_QUIESCE:
2155 case SDEV_OFFLINE: 2145 case SDEV_OFFLINE:
2146 case SDEV_TRANSPORT_OFFLINE:
2156 case SDEV_BLOCK: 2147 case SDEV_BLOCK:
2157 break; 2148 break;
2158 default: 2149 default:
@@ -2165,6 +2156,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2165 case SDEV_CREATED: 2156 case SDEV_CREATED:
2166 case SDEV_RUNNING: 2157 case SDEV_RUNNING:
2167 case SDEV_OFFLINE: 2158 case SDEV_OFFLINE:
2159 case SDEV_TRANSPORT_OFFLINE:
2168 case SDEV_CANCEL: 2160 case SDEV_CANCEL:
2169 break; 2161 break;
2170 default: 2162 default:
@@ -2422,7 +2414,6 @@ EXPORT_SYMBOL(scsi_target_resume);
2422 * (which must be a legal transition). When the device is in this 2414 * (which must be a legal transition). When the device is in this
2423 * state, all commands are deferred until the scsi lld reenables 2415 * state, all commands are deferred until the scsi lld reenables
2424 * the device with scsi_device_unblock or device_block_tmo fires. 2416 * the device with scsi_device_unblock or device_block_tmo fires.
2425 * This routine assumes the host_lock is held on entry.
2426 */ 2417 */
2427int 2418int
2428scsi_internal_device_block(struct scsi_device *sdev) 2419scsi_internal_device_block(struct scsi_device *sdev)
@@ -2455,6 +2446,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2455/** 2446/**
2456 * scsi_internal_device_unblock - resume a device after a block request 2447 * scsi_internal_device_unblock - resume a device after a block request
2457 * @sdev: device to resume 2448 * @sdev: device to resume
2449 * @new_state: state to set devices to after unblocking
2458 * 2450 *
2459 * Called by scsi lld's or the midlayer to restart the device queue 2451 * Called by scsi lld's or the midlayer to restart the device queue
2460 * for the previously suspended scsi device. Called from interrupt or 2452 * for the previously suspended scsi device. Called from interrupt or
@@ -2464,25 +2456,29 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2464 * 2456 *
2465 * Notes: 2457 * Notes:
2466 * This routine transitions the device to the SDEV_RUNNING state 2458 * This routine transitions the device to the SDEV_RUNNING state
2467 * (which must be a legal transition) allowing the midlayer to 2459 * or to one of the offline states (which must be a legal transition)
2468 * goose the queue for this device. This routine assumes the 2460 * allowing the midlayer to goose the queue for this device.
2469 * host_lock is held upon entry.
2470 */ 2461 */
2471int 2462int
2472scsi_internal_device_unblock(struct scsi_device *sdev) 2463scsi_internal_device_unblock(struct scsi_device *sdev,
2464 enum scsi_device_state new_state)
2473{ 2465{
2474 struct request_queue *q = sdev->request_queue; 2466 struct request_queue *q = sdev->request_queue;
2475 unsigned long flags; 2467 unsigned long flags;
2476 2468
2477 /* 2469 /*
2478 * Try to transition the scsi device to SDEV_RUNNING 2470 * Try to transition the scsi device to SDEV_RUNNING or one of the
2479 * and goose the device queue if successful. 2471 * offlined states and goose the device queue if successful.
2480 */ 2472 */
2481 if (sdev->sdev_state == SDEV_BLOCK) 2473 if (sdev->sdev_state == SDEV_BLOCK)
2482 sdev->sdev_state = SDEV_RUNNING; 2474 sdev->sdev_state = new_state;
2483 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) 2475 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2484 sdev->sdev_state = SDEV_CREATED; 2476 if (new_state == SDEV_TRANSPORT_OFFLINE ||
2485 else if (sdev->sdev_state != SDEV_CANCEL && 2477 new_state == SDEV_OFFLINE)
2478 sdev->sdev_state = new_state;
2479 else
2480 sdev->sdev_state = SDEV_CREATED;
2481 } else if (sdev->sdev_state != SDEV_CANCEL &&
2486 sdev->sdev_state != SDEV_OFFLINE) 2482 sdev->sdev_state != SDEV_OFFLINE)
2487 return -EINVAL; 2483 return -EINVAL;
2488 2484
@@ -2523,26 +2519,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block);
2523static void 2519static void
2524device_unblock(struct scsi_device *sdev, void *data) 2520device_unblock(struct scsi_device *sdev, void *data)
2525{ 2521{
2526 scsi_internal_device_unblock(sdev); 2522 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2527} 2523}
2528 2524
2529static int 2525static int
2530target_unblock(struct device *dev, void *data) 2526target_unblock(struct device *dev, void *data)
2531{ 2527{
2532 if (scsi_is_target_device(dev)) 2528 if (scsi_is_target_device(dev))
2533 starget_for_each_device(to_scsi_target(dev), NULL, 2529 starget_for_each_device(to_scsi_target(dev), data,
2534 device_unblock); 2530 device_unblock);
2535 return 0; 2531 return 0;
2536} 2532}
2537 2533
2538void 2534void
2539scsi_target_unblock(struct device *dev) 2535scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2540{ 2536{
2541 if (scsi_is_target_device(dev)) 2537 if (scsi_is_target_device(dev))
2542 starget_for_each_device(to_scsi_target(dev), NULL, 2538 starget_for_each_device(to_scsi_target(dev), &new_state,
2543 device_unblock); 2539 device_unblock);
2544 else 2540 else
2545 device_for_each_child(dev, NULL, target_unblock); 2541 device_for_each_child(dev, &new_state, target_unblock);
2546} 2542}
2547EXPORT_SYMBOL_GPL(scsi_target_unblock); 2543EXPORT_SYMBOL_GPL(scsi_target_unblock);
2548 2544
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index c77628afbf9f..8818dd681c19 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -486,6 +486,10 @@ void
486scsi_netlink_init(void) 486scsi_netlink_init(void)
487{ 487{
488 int error; 488 int error;
489 struct netlink_kernel_cfg cfg = {
490 .input = scsi_nl_rcv_msg,
491 .groups = SCSI_NL_GRP_CNT,
492 };
489 493
490 INIT_LIST_HEAD(&scsi_nl_drivers); 494 INIT_LIST_HEAD(&scsi_nl_drivers);
491 495
@@ -497,8 +501,7 @@ scsi_netlink_init(void)
497 } 501 }
498 502
499 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, 503 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
500 SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL, 504 THIS_MODULE, &cfg);
501 THIS_MODULE);
502 if (!scsi_nl_sock) { 505 if (!scsi_nl_sock) {
503 printk(KERN_ERR "%s: register of receive handler failed\n", 506 printk(KERN_ERR "%s: register of receive handler failed\n",
504 __func__); 507 __func__);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d4201ded3b22..dc0ad85853e2 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -76,23 +76,24 @@ static int scsi_bus_resume_common(struct device *dev)
76{ 76{
77 int err = 0; 77 int err = 0;
78 78
79 if (scsi_is_sdev_device(dev)) { 79 /*
80 /* 80 * Parent device may have runtime suspended as soon as
81 * Parent device may have runtime suspended as soon as 81 * it is woken up during the system resume.
82 * it is woken up during the system resume. 82 *
83 * 83 * Resume it on behalf of child.
84 * Resume it on behalf of child. 84 */
85 */ 85 pm_runtime_get_sync(dev->parent);
86 pm_runtime_get_sync(dev->parent);
87 err = scsi_dev_type_resume(dev);
88 pm_runtime_put_sync(dev->parent);
89 }
90 86
87 if (scsi_is_sdev_device(dev))
88 err = scsi_dev_type_resume(dev);
91 if (err == 0) { 89 if (err == 0) {
92 pm_runtime_disable(dev); 90 pm_runtime_disable(dev);
93 pm_runtime_set_active(dev); 91 pm_runtime_set_active(dev);
94 pm_runtime_enable(dev); 92 pm_runtime_enable(dev);
95 } 93 }
94
95 pm_runtime_put_sync(dev->parent);
96
96 return err; 97 return err;
97} 98}
98 99
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 07ce3f51701d..8f9a0cadc296 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -2,6 +2,8 @@
2#define _SCSI_PRIV_H 2#define _SCSI_PRIV_H
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/async.h>
6#include <scsi/scsi_device.h>
5 7
6struct request_queue; 8struct request_queue;
7struct request; 9struct request;
@@ -79,12 +81,11 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd);
79/* scsi_lib.c */ 81/* scsi_lib.c */
80extern int scsi_maybe_unblock_host(struct scsi_device *sdev); 82extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
81extern void scsi_device_unbusy(struct scsi_device *sdev); 83extern void scsi_device_unbusy(struct scsi_device *sdev);
82extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); 84extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
83extern void scsi_next_command(struct scsi_cmnd *cmd); 85extern void scsi_next_command(struct scsi_cmnd *cmd);
84extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); 86extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
85extern void scsi_run_host_queues(struct Scsi_Host *shost); 87extern void scsi_run_host_queues(struct Scsi_Host *shost);
86extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); 88extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
87extern void scsi_free_queue(struct request_queue *q);
88extern int scsi_init_queue(void); 89extern int scsi_init_queue(void);
89extern void scsi_exit_queue(void); 90extern void scsi_exit_queue(void);
90struct request_queue; 91struct request_queue;
@@ -163,7 +164,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
163static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} 164static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
164#endif /* CONFIG_PM_RUNTIME */ 165#endif /* CONFIG_PM_RUNTIME */
165 166
166extern struct list_head scsi_sd_probe_domain; 167extern struct async_domain scsi_sd_probe_domain;
167 168
168/* 169/*
169 * internal scsi timeout functions: for use by mid-layer and transport 170 * internal scsi timeout functions: for use by mid-layer and transport
@@ -172,6 +173,7 @@ extern struct list_head scsi_sd_probe_domain;
172 173
173#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */ 174#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
174extern int scsi_internal_device_block(struct scsi_device *sdev); 175extern int scsi_internal_device_block(struct scsi_device *sdev);
175extern int scsi_internal_device_unblock(struct scsi_device *sdev); 176extern int scsi_internal_device_unblock(struct scsi_device *sdev,
177 enum scsi_device_state new_state);
176 178
177#endif /* _SCSI_PRIV_H */ 179#endif /* _SCSI_PRIV_H */
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 2e5fe584aad3..56a93794c470 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
147 147
148 do { 148 do {
149 if (list_empty(&scanning_hosts)) 149 if (list_empty(&scanning_hosts))
150 goto out; 150 return 0;
151 /* If we can't get memory immediately, that's OK. Just 151 /* If we can't get memory immediately, that's OK. Just
152 * sleep a little. Even if we never get memory, the async 152 * sleep a little. Even if we never get memory, the async
153 * scans will finish eventually. 153 * scans will finish eventually.
@@ -179,26 +179,11 @@ int scsi_complete_async_scans(void)
179 } 179 }
180 done: 180 done:
181 spin_unlock(&async_scan_lock); 181 spin_unlock(&async_scan_lock);
182 kfree(data);
183
184 out:
185 async_synchronize_full_domain(&scsi_sd_probe_domain);
186 182
183 kfree(data);
187 return 0; 184 return 0;
188} 185}
189 186
190/* Only exported for the benefit of scsi_wait_scan */
191EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
192
193#ifndef MODULE
194/*
195 * For async scanning we need to wait for all the scans to complete before
196 * trying to mount the root fs. Otherwise non-modular drivers may not be ready
197 * yet.
198 */
199late_initcall(scsi_complete_async_scans);
200#endif
201
202/** 187/**
203 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 188 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
204 * @sdev: scsi device to send command to 189 * @sdev: scsi device to send command to
@@ -1717,6 +1702,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1717{ 1702{
1718 struct scsi_device *sdev; 1703 struct scsi_device *sdev;
1719 shost_for_each_device(sdev, shost) { 1704 shost_for_each_device(sdev, shost) {
1705 /* target removed before the device could be added */
1706 if (sdev->sdev_state == SDEV_DEL)
1707 continue;
1720 if (!scsi_host_scan_allowed(shost) || 1708 if (!scsi_host_scan_allowed(shost) ||
1721 scsi_sysfs_add_sdev(sdev) != 0) 1709 scsi_sysfs_add_sdev(sdev) != 0)
1722 __scsi_remove_device(sdev); 1710 __scsi_remove_device(sdev);
@@ -1842,14 +1830,13 @@ static void do_scsi_scan_host(struct Scsi_Host *shost)
1842 } 1830 }
1843} 1831}
1844 1832
1845static int do_scan_async(void *_data) 1833static void do_scan_async(void *_data, async_cookie_t c)
1846{ 1834{
1847 struct async_scan_data *data = _data; 1835 struct async_scan_data *data = _data;
1848 struct Scsi_Host *shost = data->shost; 1836 struct Scsi_Host *shost = data->shost;
1849 1837
1850 do_scsi_scan_host(shost); 1838 do_scsi_scan_host(shost);
1851 scsi_finish_async_scan(data); 1839 scsi_finish_async_scan(data);
1852 return 0;
1853} 1840}
1854 1841
1855/** 1842/**
@@ -1858,7 +1845,6 @@ static int do_scan_async(void *_data)
1858 **/ 1845 **/
1859void scsi_scan_host(struct Scsi_Host *shost) 1846void scsi_scan_host(struct Scsi_Host *shost)
1860{ 1847{
1861 struct task_struct *p;
1862 struct async_scan_data *data; 1848 struct async_scan_data *data;
1863 1849
1864 if (strncmp(scsi_scan_type, "none", 4) == 0) 1850 if (strncmp(scsi_scan_type, "none", 4) == 0)
@@ -1873,9 +1859,11 @@ void scsi_scan_host(struct Scsi_Host *shost)
1873 return; 1859 return;
1874 } 1860 }
1875 1861
1876 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); 1862 /* register with the async subsystem so wait_for_device_probe()
1877 if (IS_ERR(p)) 1863 * will flush this work
1878 do_scan_async(data); 1864 */
1865 async_schedule(do_scan_async, data);
1866
1879 /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ 1867 /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
1880} 1868}
1881EXPORT_SYMBOL(scsi_scan_host); 1869EXPORT_SYMBOL(scsi_scan_host);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 04c2a278076e..093d4f6a54d2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -35,6 +35,7 @@ static const struct {
35 { SDEV_DEL, "deleted" }, 35 { SDEV_DEL, "deleted" },
36 { SDEV_QUIESCE, "quiesce" }, 36 { SDEV_QUIESCE, "quiesce" },
37 { SDEV_OFFLINE, "offline" }, 37 { SDEV_OFFLINE, "offline" },
38 { SDEV_TRANSPORT_OFFLINE, "transport-offline" },
38 { SDEV_BLOCK, "blocked" }, 39 { SDEV_BLOCK, "blocked" },
39 { SDEV_CREATED_BLOCK, "created-blocked" }, 40 { SDEV_CREATED_BLOCK, "created-blocked" },
40}; 41};
@@ -966,16 +967,20 @@ void __scsi_remove_device(struct scsi_device *sdev)
966 device_del(dev); 967 device_del(dev);
967 } else 968 } else
968 put_device(&sdev->sdev_dev); 969 put_device(&sdev->sdev_dev);
970
971 /*
972 * Stop accepting new requests and wait until all queuecommand() and
973 * scsi_run_queue() invocations have finished before tearing down the
974 * device.
975 */
969 scsi_device_set_state(sdev, SDEV_DEL); 976 scsi_device_set_state(sdev, SDEV_DEL);
977 blk_cleanup_queue(sdev->request_queue);
978 cancel_work_sync(&sdev->requeue_work);
979
970 if (sdev->host->hostt->slave_destroy) 980 if (sdev->host->hostt->slave_destroy)
971 sdev->host->hostt->slave_destroy(sdev); 981 sdev->host->hostt->slave_destroy(sdev);
972 transport_destroy_device(dev); 982 transport_destroy_device(dev);
973 983
974 /* cause the request function to reject all I/O requests */
975 sdev->request_queue->queuedata = NULL;
976
977 /* Freeing the queue signals to block that we're done */
978 scsi_free_queue(sdev->request_queue);
979 put_device(dev); 984 put_device(dev);
980} 985}
981 986
@@ -1000,7 +1005,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
1000 struct scsi_device *sdev; 1005 struct scsi_device *sdev;
1001 1006
1002 spin_lock_irqsave(shost->host_lock, flags); 1007 spin_lock_irqsave(shost->host_lock, flags);
1003 starget->reap_ref++;
1004 restart: 1008 restart:
1005 list_for_each_entry(sdev, &shost->__devices, siblings) { 1009 list_for_each_entry(sdev, &shost->__devices, siblings) {
1006 if (sdev->channel != starget->channel || 1010 if (sdev->channel != starget->channel ||
@@ -1014,14 +1018,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
1014 goto restart; 1018 goto restart;
1015 } 1019 }
1016 spin_unlock_irqrestore(shost->host_lock, flags); 1020 spin_unlock_irqrestore(shost->host_lock, flags);
1017 scsi_target_reap(starget);
1018}
1019
1020static int __remove_child (struct device * dev, void * data)
1021{
1022 if (scsi_is_target_device(dev))
1023 __scsi_remove_target(to_scsi_target(dev));
1024 return 0;
1025} 1021}
1026 1022
1027/** 1023/**
@@ -1034,14 +1030,34 @@ static int __remove_child (struct device * dev, void * data)
1034 */ 1030 */
1035void scsi_remove_target(struct device *dev) 1031void scsi_remove_target(struct device *dev)
1036{ 1032{
1037 if (scsi_is_target_device(dev)) { 1033 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1038 __scsi_remove_target(to_scsi_target(dev)); 1034 struct scsi_target *starget, *found;
1039 return; 1035 unsigned long flags;
1036
1037 restart:
1038 found = NULL;
1039 spin_lock_irqsave(shost->host_lock, flags);
1040 list_for_each_entry(starget, &shost->__targets, siblings) {
1041 if (starget->state == STARGET_DEL)
1042 continue;
1043 if (starget->dev.parent == dev || &starget->dev == dev) {
1044 found = starget;
1045 found->reap_ref++;
1046 break;
1047 }
1040 } 1048 }
1049 spin_unlock_irqrestore(shost->host_lock, flags);
1041 1050
1042 get_device(dev); 1051 if (found) {
1043 device_for_each_child(dev, NULL, __remove_child); 1052 __scsi_remove_target(found);
1044 put_device(dev); 1053 scsi_target_reap(found);
1054 /* in the case where @dev has multiple starget children,
1055 * continue removing.
1056 *
1057 * FIXME: does such a case exist?
1058 */
1059 goto restart;
1060 }
1045} 1061}
1046EXPORT_SYMBOL(scsi_remove_target); 1062EXPORT_SYMBOL(scsi_remove_target);
1047 1063
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 579760420d53..2d1e68db9b3f 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1744,6 +1744,15 @@ fc_host_statistic(fcp_output_requests);
1744fc_host_statistic(fcp_control_requests); 1744fc_host_statistic(fcp_control_requests);
1745fc_host_statistic(fcp_input_megabytes); 1745fc_host_statistic(fcp_input_megabytes);
1746fc_host_statistic(fcp_output_megabytes); 1746fc_host_statistic(fcp_output_megabytes);
1747fc_host_statistic(fcp_packet_alloc_failures);
1748fc_host_statistic(fcp_packet_aborts);
1749fc_host_statistic(fcp_frame_alloc_failures);
1750fc_host_statistic(fc_no_free_exch);
1751fc_host_statistic(fc_no_free_exch_xid);
1752fc_host_statistic(fc_xid_not_found);
1753fc_host_statistic(fc_xid_busy);
1754fc_host_statistic(fc_seq_not_found);
1755fc_host_statistic(fc_non_bls_resp);
1747 1756
1748static ssize_t 1757static ssize_t
1749fc_reset_statistics(struct device *dev, struct device_attribute *attr, 1758fc_reset_statistics(struct device *dev, struct device_attribute *attr,
@@ -1784,6 +1793,15 @@ static struct attribute *fc_statistics_attrs[] = {
1784 &device_attr_host_fcp_control_requests.attr, 1793 &device_attr_host_fcp_control_requests.attr,
1785 &device_attr_host_fcp_input_megabytes.attr, 1794 &device_attr_host_fcp_input_megabytes.attr,
1786 &device_attr_host_fcp_output_megabytes.attr, 1795 &device_attr_host_fcp_output_megabytes.attr,
1796 &device_attr_host_fcp_packet_alloc_failures.attr,
1797 &device_attr_host_fcp_packet_aborts.attr,
1798 &device_attr_host_fcp_frame_alloc_failures.attr,
1799 &device_attr_host_fc_no_free_exch.attr,
1800 &device_attr_host_fc_no_free_exch_xid.attr,
1801 &device_attr_host_fc_xid_not_found.attr,
1802 &device_attr_host_fc_xid_busy.attr,
1803 &device_attr_host_fc_seq_not_found.attr,
1804 &device_attr_host_fc_non_bls_resp.attr,
1787 &device_attr_host_reset_statistics.attr, 1805 &device_attr_host_reset_statistics.attr,
1788 NULL 1806 NULL
1789}; 1807};
@@ -2477,11 +2495,9 @@ static void fc_terminate_rport_io(struct fc_rport *rport)
2477 i->f->terminate_rport_io(rport); 2495 i->f->terminate_rport_io(rport);
2478 2496
2479 /* 2497 /*
2480 * must unblock to flush queued IO. The caller will have set 2498 * Must unblock to flush queued IO. scsi-ml will fail incoming reqs.
2481 * the port_state or flags, so that fc_remote_port_chkready will
2482 * fail IO.
2483 */ 2499 */
2484 scsi_target_unblock(&rport->dev); 2500 scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
2485} 2501}
2486 2502
2487/** 2503/**
@@ -2812,8 +2828,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2812 2828
2813 /* if target, initiate a scan */ 2829 /* if target, initiate a scan */
2814 if (rport->scsi_target_id != -1) { 2830 if (rport->scsi_target_id != -1) {
2815 scsi_target_unblock(&rport->dev); 2831 scsi_target_unblock(&rport->dev,
2816 2832 SDEV_RUNNING);
2817 spin_lock_irqsave(shost->host_lock, 2833 spin_lock_irqsave(shost->host_lock,
2818 flags); 2834 flags);
2819 rport->flags |= FC_RPORT_SCAN_PENDING; 2835 rport->flags |= FC_RPORT_SCAN_PENDING;
@@ -2882,7 +2898,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2882 spin_unlock_irqrestore(shost->host_lock, flags); 2898 spin_unlock_irqrestore(shost->host_lock, flags);
2883 2899
2884 if (ids->roles & FC_PORT_ROLE_FCP_TARGET) { 2900 if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
2885 scsi_target_unblock(&rport->dev); 2901 scsi_target_unblock(&rport->dev, SDEV_RUNNING);
2886 2902
2887 /* initiate a scan of the target */ 2903 /* initiate a scan of the target */
2888 spin_lock_irqsave(shost->host_lock, flags); 2904 spin_lock_irqsave(shost->host_lock, flags);
@@ -3087,7 +3103,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
3087 /* ensure any stgt delete functions are done */ 3103 /* ensure any stgt delete functions are done */
3088 fc_flush_work(shost); 3104 fc_flush_work(shost);
3089 3105
3090 scsi_target_unblock(&rport->dev); 3106 scsi_target_unblock(&rport->dev, SDEV_RUNNING);
3091 /* initiate a scan of the target */ 3107 /* initiate a scan of the target */
3092 spin_lock_irqsave(shost->host_lock, flags); 3108 spin_lock_irqsave(shost->host_lock, flags);
3093 rport->flags |= FC_RPORT_SCAN_PENDING; 3109 rport->flags |= FC_RPORT_SCAN_PENDING;
@@ -3131,7 +3147,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
3131 "blocked FC remote port time out: no longer" 3147 "blocked FC remote port time out: no longer"
3132 " a FCP target, removing starget\n"); 3148 " a FCP target, removing starget\n");
3133 spin_unlock_irqrestore(shost->host_lock, flags); 3149 spin_unlock_irqrestore(shost->host_lock, flags);
3134 scsi_target_unblock(&rport->dev); 3150 scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
3135 fc_queue_work(shost, &rport->stgt_delete_work); 3151 fc_queue_work(shost, &rport->stgt_delete_work);
3136 return; 3152 return;
3137 } 3153 }
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 1cf640e575da..09809d06eccb 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -907,7 +907,7 @@ static void session_recovery_timedout(struct work_struct *work)
907 session->transport->session_recovery_timedout(session); 907 session->transport->session_recovery_timedout(session);
908 908
909 ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); 909 ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
910 scsi_target_unblock(&session->dev); 910 scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
911 ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); 911 ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
912} 912}
913 913
@@ -930,7 +930,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
930 session->state = ISCSI_SESSION_LOGGED_IN; 930 session->state = ISCSI_SESSION_LOGGED_IN;
931 spin_unlock_irqrestore(&session->lock, flags); 931 spin_unlock_irqrestore(&session->lock, flags);
932 /* start IO */ 932 /* start IO */
933 scsi_target_unblock(&session->dev); 933 scsi_target_unblock(&session->dev, SDEV_RUNNING);
934 /* 934 /*
935 * Only do kernel scanning if the driver is properly hooked into 935 * Only do kernel scanning if the driver is properly hooked into
936 * the async scanning code (drivers like iscsi_tcp do login and 936 * the async scanning code (drivers like iscsi_tcp do login and
@@ -1180,7 +1180,7 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
1180 session->state = ISCSI_SESSION_FREE; 1180 session->state = ISCSI_SESSION_FREE;
1181 spin_unlock_irqrestore(&session->lock, flags); 1181 spin_unlock_irqrestore(&session->lock, flags);
1182 1182
1183 scsi_target_unblock(&session->dev); 1183 scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
1184 /* flush running scans then delete devices */ 1184 /* flush running scans then delete devices */
1185 scsi_flush_work(shost); 1185 scsi_flush_work(shost);
1186 __iscsi_unbind_session(&session->unbind_work); 1186 __iscsi_unbind_session(&session->unbind_work);
@@ -2936,7 +2936,10 @@ EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
2936static __init int iscsi_transport_init(void) 2936static __init int iscsi_transport_init(void)
2937{ 2937{
2938 int err; 2938 int err;
2939 2939 struct netlink_kernel_cfg cfg = {
2940 .groups = 1,
2941 .input = iscsi_if_rx,
2942 };
2940 printk(KERN_INFO "Loading iSCSI transport class v%s.\n", 2943 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
2941 ISCSI_TRANSPORT_VERSION); 2944 ISCSI_TRANSPORT_VERSION);
2942 2945
@@ -2966,8 +2969,8 @@ static __init int iscsi_transport_init(void)
2966 if (err) 2969 if (err)
2967 goto unregister_conn_class; 2970 goto unregister_conn_class;
2968 2971
2969 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, 2972 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI,
2970 NULL, THIS_MODULE); 2973 THIS_MODULE, &cfg);
2971 if (!nls) { 2974 if (!nls) {
2972 err = -ENOBUFS; 2975 err = -ENOBUFS;
2973 goto unregister_session_class; 2976 goto unregister_session_class;
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
deleted file mode 100644
index ae7814874618..000000000000
--- a/drivers/scsi/scsi_wait_scan.c
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * scsi_wait_scan.c
3 *
4 * Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com>
5 *
6 * This is a simple module to wait until all the async scans are
7 * complete. The idea is to use it in initrd/initramfs scripts. You
8 * modprobe it after all the modprobes of the root SCSI drivers and it
9 * will wait until they have all finished scanning their busses before
10 * allowing the boot to proceed
11 */
12
13#include <linux/module.h>
14#include <linux/device.h>
15#include "scsi_priv.h"
16
17static int __init wait_scan_init(void)
18{
19 /*
20 * First we need to wait for device probing to finish;
21 * the drivers we just loaded might just still be probing
22 * and might not yet have reached the scsi async scanning
23 */
24 wait_for_device_probe();
25 /*
26 * and then we wait for the actual asynchronous scsi scan
27 * to finish.
28 */
29 scsi_complete_async_scans();
30 return 0;
31}
32
33static void __exit wait_scan_exit(void)
34{
35}
36
37MODULE_DESCRIPTION("SCSI wait for scans");
38MODULE_AUTHOR("James Bottomley");
39MODULE_LICENSE("GPL");
40
41late_initcall(wait_scan_init);
42module_exit(wait_scan_exit);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6f72b80121a0..4df73e52a4f9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2261,8 +2261,13 @@ bad_sense:
2261 sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n"); 2261 sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
2262 2262
2263defaults: 2263defaults:
2264 sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n"); 2264 if (sdp->wce_default_on) {
2265 sdkp->WCE = 0; 2265 sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n");
2266 sdkp->WCE = 1;
2267 } else {
2268 sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
2269 sdkp->WCE = 0;
2270 }
2266 sdkp->RCD = 0; 2271 sdkp->RCD = 0;
2267 sdkp->DPOFUA = 0; 2272 sdkp->DPOFUA = 0;
2268} 2273}
@@ -2704,6 +2709,7 @@ static int sd_probe(struct device *dev)
2704 sdkp->disk = gd; 2709 sdkp->disk = gd;
2705 sdkp->index = index; 2710 sdkp->index = index;
2706 atomic_set(&sdkp->openers, 0); 2711 atomic_set(&sdkp->openers, 0);
2712 atomic_set(&sdkp->device->ioerr_cnt, 0);
2707 2713
2708 if (!sdp->request_queue->rq_timeout) { 2714 if (!sdp->request_queue->rq_timeout) {
2709 if (sdp->type != TYPE_MOD) 2715 if (sdp->type != TYPE_MOD)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 6a4fd00117ca..58f4ba6fe412 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -232,11 +232,11 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
232 * the host controller 232 * the host controller
233 * @reg_hcs - host controller status register value 233 * @reg_hcs - host controller status register value
234 * 234 *
235 * Returns 0 if device present, non-zero if no device detected 235 * Returns 1 if device present, 0 if no device detected
236 */ 236 */
237static inline int ufshcd_is_device_present(u32 reg_hcs) 237static inline int ufshcd_is_device_present(u32 reg_hcs)
238{ 238{
239 return (DEVICE_PRESENT & reg_hcs) ? 0 : -1; 239 return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
240} 240}
241 241
242/** 242/**
@@ -911,7 +911,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
911 911
912 /* check if device present */ 912 /* check if device present */
913 reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS)); 913 reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
914 if (ufshcd_is_device_present(reg)) { 914 if (!ufshcd_is_device_present(reg)) {
915 dev_err(&hba->pdev->dev, "cc: Device not present\n"); 915 dev_err(&hba->pdev->dev, "cc: Device not present\n");
916 err = -ENXIO; 916 err = -ENXIO;
917 goto out; 917 goto out;
@@ -1163,6 +1163,8 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
1163 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && 1163 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
1164 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) 1164 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1165 task_result = FAILED; 1165 task_result = FAILED;
1166 else
1167 task_result = SUCCESS;
1166 } else { 1168 } else {
1167 task_result = FAILED; 1169 task_result = FAILED;
1168 dev_err(&hba->pdev->dev, 1170 dev_err(&hba->pdev->dev,
@@ -1556,7 +1558,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
1556 goto out; 1558 goto out;
1557 } 1559 }
1558 clear_bit(free_slot, &hba->tm_condition); 1560 clear_bit(free_slot, &hba->tm_condition);
1559 return ufshcd_task_req_compl(hba, free_slot); 1561 err = ufshcd_task_req_compl(hba, free_slot);
1560out: 1562out:
1561 return err; 1563 return err;
1562} 1564}
@@ -1580,7 +1582,7 @@ static int ufshcd_device_reset(struct scsi_cmnd *cmd)
1580 tag = cmd->request->tag; 1582 tag = cmd->request->tag;
1581 1583
1582 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); 1584 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
1583 if (err) 1585 if (err == FAILED)
1584 goto out; 1586 goto out;
1585 1587
1586 for (pos = 0; pos < hba->nutrs; pos++) { 1588 for (pos = 0; pos < hba->nutrs; pos++) {
@@ -1620,7 +1622,7 @@ static int ufshcd_host_reset(struct scsi_cmnd *cmd)
1620 if (hba->ufshcd_state == UFSHCD_STATE_RESET) 1622 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1621 return SUCCESS; 1623 return SUCCESS;
1622 1624
1623 return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED; 1625 return ufshcd_do_reset(hba);
1624} 1626}
1625 1627
1626/** 1628/**
@@ -1652,7 +1654,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
1652 spin_unlock_irqrestore(host->host_lock, flags); 1654 spin_unlock_irqrestore(host->host_lock, flags);
1653 1655
1654 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); 1656 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
1655 if (err) 1657 if (err == FAILED)
1656 goto out; 1658 goto out;
1657 1659
1658 scsi_dma_unmap(cmd); 1660 scsi_dma_unmap(cmd);
@@ -1953,24 +1955,7 @@ static struct pci_driver ufshcd_pci_driver = {
1953#endif 1955#endif
1954}; 1956};
1955 1957
1956/** 1958module_pci_driver(ufshcd_pci_driver);
1957 * ufshcd_init - Driver registration routine
1958 */
1959static int __init ufshcd_init(void)
1960{
1961 return pci_register_driver(&ufshcd_pci_driver);
1962}
1963module_init(ufshcd_init);
1964
1965/**
1966 * ufshcd_exit - Driver exit clean-up routine
1967 */
1968static void __exit ufshcd_exit(void)
1969{
1970 pci_unregister_driver(&ufshcd_pci_driver);
1971}
1972module_exit(ufshcd_exit);
1973
1974 1959
1975MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, " 1960MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
1976 "Vinayak Holikatti <h.vinayak@samsung.com>"); 1961 "Vinayak Holikatti <h.vinayak@samsung.com>");
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 1b3843117268..c7030fbee79c 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -25,6 +25,7 @@
25#include <scsi/scsi_cmnd.h> 25#include <scsi/scsi_cmnd.h>
26 26
27#define VIRTIO_SCSI_MEMPOOL_SZ 64 27#define VIRTIO_SCSI_MEMPOOL_SZ 64
28#define VIRTIO_SCSI_EVENT_LEN 8
28 29
29/* Command queue element */ 30/* Command queue element */
30struct virtio_scsi_cmd { 31struct virtio_scsi_cmd {
@@ -43,20 +44,42 @@ struct virtio_scsi_cmd {
43 } resp; 44 } resp;
44} ____cacheline_aligned_in_smp; 45} ____cacheline_aligned_in_smp;
45 46
46/* Driver instance state */ 47struct virtio_scsi_event_node {
47struct virtio_scsi { 48 struct virtio_scsi *vscsi;
48 /* Protects ctrl_vq, req_vq and sg[] */ 49 struct virtio_scsi_event event;
50 struct work_struct work;
51};
52
53struct virtio_scsi_vq {
54 /* Protects vq */
49 spinlock_t vq_lock; 55 spinlock_t vq_lock;
50 56
51 struct virtio_device *vdev; 57 struct virtqueue *vq;
52 struct virtqueue *ctrl_vq; 58};
53 struct virtqueue *event_vq; 59
54 struct virtqueue *req_vq; 60/* Per-target queue state */
61struct virtio_scsi_target_state {
62 /* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */
63 spinlock_t tgt_lock;
55 64
56 /* For sglist construction when adding commands to the virtqueue. */ 65 /* For sglist construction when adding commands to the virtqueue. */
57 struct scatterlist sg[]; 66 struct scatterlist sg[];
58}; 67};
59 68
69/* Driver instance state */
70struct virtio_scsi {
71 struct virtio_device *vdev;
72
73 struct virtio_scsi_vq ctrl_vq;
74 struct virtio_scsi_vq event_vq;
75 struct virtio_scsi_vq req_vq;
76
77 /* Get some buffers ready for event vq */
78 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
79
80 struct virtio_scsi_target_state *tgt[];
81};
82
60static struct kmem_cache *virtscsi_cmd_cache; 83static struct kmem_cache *virtscsi_cmd_cache;
61static mempool_t *virtscsi_cmd_pool; 84static mempool_t *virtscsi_cmd_pool;
62 85
@@ -147,26 +170,25 @@ static void virtscsi_complete_cmd(void *buf)
147 170
148static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf)) 171static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
149{ 172{
150 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
151 struct virtio_scsi *vscsi = shost_priv(sh);
152 void *buf; 173 void *buf;
153 unsigned long flags;
154 unsigned int len; 174 unsigned int len;
155 175
156 spin_lock_irqsave(&vscsi->vq_lock, flags);
157
158 do { 176 do {
159 virtqueue_disable_cb(vq); 177 virtqueue_disable_cb(vq);
160 while ((buf = virtqueue_get_buf(vq, &len)) != NULL) 178 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
161 fn(buf); 179 fn(buf);
162 } while (!virtqueue_enable_cb(vq)); 180 } while (!virtqueue_enable_cb(vq));
163
164 spin_unlock_irqrestore(&vscsi->vq_lock, flags);
165} 181}
166 182
167static void virtscsi_req_done(struct virtqueue *vq) 183static void virtscsi_req_done(struct virtqueue *vq)
168{ 184{
185 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
186 struct virtio_scsi *vscsi = shost_priv(sh);
187 unsigned long flags;
188
189 spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags);
169 virtscsi_vq_done(vq, virtscsi_complete_cmd); 190 virtscsi_vq_done(vq, virtscsi_complete_cmd);
191 spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags);
170}; 192};
171 193
172static void virtscsi_complete_free(void *buf) 194static void virtscsi_complete_free(void *buf)
@@ -181,12 +203,123 @@ static void virtscsi_complete_free(void *buf)
181 203
182static void virtscsi_ctrl_done(struct virtqueue *vq) 204static void virtscsi_ctrl_done(struct virtqueue *vq)
183{ 205{
206 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
207 struct virtio_scsi *vscsi = shost_priv(sh);
208 unsigned long flags;
209
210 spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags);
184 virtscsi_vq_done(vq, virtscsi_complete_free); 211 virtscsi_vq_done(vq, virtscsi_complete_free);
212 spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags);
185}; 213};
186 214
215static int virtscsi_kick_event(struct virtio_scsi *vscsi,
216 struct virtio_scsi_event_node *event_node)
217{
218 int ret;
219 struct scatterlist sg;
220 unsigned long flags;
221
222 sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
223
224 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
225
226 ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC);
227 if (ret >= 0)
228 virtqueue_kick(vscsi->event_vq.vq);
229
230 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
231
232 return ret;
233}
234
235static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
236{
237 int i;
238
239 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
240 vscsi->event_list[i].vscsi = vscsi;
241 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
242 }
243
244 return 0;
245}
246
247static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
248{
249 int i;
250
251 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
252 cancel_work_sync(&vscsi->event_list[i].work);
253}
254
255static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
256 struct virtio_scsi_event *event)
257{
258 struct scsi_device *sdev;
259 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
260 unsigned int target = event->lun[1];
261 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
262
263 switch (event->reason) {
264 case VIRTIO_SCSI_EVT_RESET_RESCAN:
265 scsi_add_device(shost, 0, target, lun);
266 break;
267 case VIRTIO_SCSI_EVT_RESET_REMOVED:
268 sdev = scsi_device_lookup(shost, 0, target, lun);
269 if (sdev) {
270 scsi_remove_device(sdev);
271 scsi_device_put(sdev);
272 } else {
273 pr_err("SCSI device %d 0 %d %d not found\n",
274 shost->host_no, target, lun);
275 }
276 break;
277 default:
278 pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
279 }
280}
281
282static void virtscsi_handle_event(struct work_struct *work)
283{
284 struct virtio_scsi_event_node *event_node =
285 container_of(work, struct virtio_scsi_event_node, work);
286 struct virtio_scsi *vscsi = event_node->vscsi;
287 struct virtio_scsi_event *event = &event_node->event;
288
289 if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
290 event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
291 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
292 }
293
294 switch (event->event) {
295 case VIRTIO_SCSI_T_NO_EVENT:
296 break;
297 case VIRTIO_SCSI_T_TRANSPORT_RESET:
298 virtscsi_handle_transport_reset(vscsi, event);
299 break;
300 default:
301 pr_err("Unsupport virtio scsi event %x\n", event->event);
302 }
303 virtscsi_kick_event(vscsi, event_node);
304}
305
306static void virtscsi_complete_event(void *buf)
307{
308 struct virtio_scsi_event_node *event_node = buf;
309
310 INIT_WORK(&event_node->work, virtscsi_handle_event);
311 schedule_work(&event_node->work);
312}
313
187static void virtscsi_event_done(struct virtqueue *vq) 314static void virtscsi_event_done(struct virtqueue *vq)
188{ 315{
189 virtscsi_vq_done(vq, virtscsi_complete_free); 316 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
317 struct virtio_scsi *vscsi = shost_priv(sh);
318 unsigned long flags;
319
320 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
321 virtscsi_vq_done(vq, virtscsi_complete_event);
322 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
190}; 323};
191 324
192static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx, 325static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
@@ -212,25 +345,17 @@ static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
212 * @req_size : size of the request buffer 345 * @req_size : size of the request buffer
213 * @resp_size : size of the response buffer 346 * @resp_size : size of the response buffer
214 * 347 *
215 * Called with vq_lock held. 348 * Called with tgt_lock held.
216 */ 349 */
217static void virtscsi_map_cmd(struct virtio_scsi *vscsi, 350static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt,
218 struct virtio_scsi_cmd *cmd, 351 struct virtio_scsi_cmd *cmd,
219 unsigned *out_num, unsigned *in_num, 352 unsigned *out_num, unsigned *in_num,
220 size_t req_size, size_t resp_size) 353 size_t req_size, size_t resp_size)
221{ 354{
222 struct scsi_cmnd *sc = cmd->sc; 355 struct scsi_cmnd *sc = cmd->sc;
223 struct scatterlist *sg = vscsi->sg; 356 struct scatterlist *sg = tgt->sg;
224 unsigned int idx = 0; 357 unsigned int idx = 0;
225 358
226 if (sc) {
227 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
228 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
229
230 /* TODO: check feature bit and fail if unsupported? */
231 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
232 }
233
234 /* Request header. */ 359 /* Request header. */
235 sg_set_buf(&sg[idx++], &cmd->req, req_size); 360 sg_set_buf(&sg[idx++], &cmd->req, req_size);
236 361
@@ -250,7 +375,8 @@ static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
250 *in_num = idx - *out_num; 375 *in_num = idx - *out_num;
251} 376}
252 377
253static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq, 378static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
379 struct virtio_scsi_vq *vq,
254 struct virtio_scsi_cmd *cmd, 380 struct virtio_scsi_cmd *cmd,
255 size_t req_size, size_t resp_size, gfp_t gfp) 381 size_t req_size, size_t resp_size, gfp_t gfp)
256{ 382{
@@ -258,24 +384,35 @@ static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq,
258 unsigned long flags; 384 unsigned long flags;
259 int ret; 385 int ret;
260 386
261 spin_lock_irqsave(&vscsi->vq_lock, flags); 387 spin_lock_irqsave(&tgt->tgt_lock, flags);
262 388 virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
263 virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size);
264 389
265 ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp); 390 spin_lock(&vq->vq_lock);
391 ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
392 spin_unlock(&tgt->tgt_lock);
266 if (ret >= 0) 393 if (ret >= 0)
267 virtqueue_kick(vq); 394 ret = virtqueue_kick_prepare(vq->vq);
395
396 spin_unlock_irqrestore(&vq->vq_lock, flags);
268 397
269 spin_unlock_irqrestore(&vscsi->vq_lock, flags); 398 if (ret > 0)
399 virtqueue_notify(vq->vq);
270 return ret; 400 return ret;
271} 401}
272 402
273static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) 403static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
274{ 404{
275 struct virtio_scsi *vscsi = shost_priv(sh); 405 struct virtio_scsi *vscsi = shost_priv(sh);
406 struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id];
276 struct virtio_scsi_cmd *cmd; 407 struct virtio_scsi_cmd *cmd;
277 int ret; 408 int ret;
278 409
410 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
411 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
412
413 /* TODO: check feature bit and fail if unsupported? */
414 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
415
279 dev_dbg(&sc->device->sdev_gendev, 416 dev_dbg(&sc->device->sdev_gendev,
280 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); 417 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
281 418
@@ -300,7 +437,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
300 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); 437 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
301 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); 438 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
302 439
303 if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd, 440 if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
304 sizeof cmd->req.cmd, sizeof cmd->resp.cmd, 441 sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
305 GFP_ATOMIC) >= 0) 442 GFP_ATOMIC) >= 0)
306 ret = 0; 443 ret = 0;
@@ -312,10 +449,11 @@ out:
312static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) 449static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
313{ 450{
314 DECLARE_COMPLETION_ONSTACK(comp); 451 DECLARE_COMPLETION_ONSTACK(comp);
452 struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id];
315 int ret = FAILED; 453 int ret = FAILED;
316 454
317 cmd->comp = &comp; 455 cmd->comp = &comp;
318 if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, 456 if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd,
319 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, 457 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
320 GFP_NOIO) < 0) 458 GFP_NOIO) < 0)
321 goto out; 459 goto out;
@@ -408,11 +546,63 @@ static struct scsi_host_template virtscsi_host_template = {
408 &__val, sizeof(__val)); \ 546 &__val, sizeof(__val)); \
409 }) 547 })
410 548
549static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
550 struct virtqueue *vq)
551{
552 spin_lock_init(&virtscsi_vq->vq_lock);
553 virtscsi_vq->vq = vq;
554}
555
556static struct virtio_scsi_target_state *virtscsi_alloc_tgt(
557 struct virtio_device *vdev, int sg_elems)
558{
559 struct virtio_scsi_target_state *tgt;
560 gfp_t gfp_mask = GFP_KERNEL;
561
562 /* We need extra sg elements at head and tail. */
563 tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2),
564 gfp_mask);
565
566 if (!tgt)
567 return NULL;
568
569 spin_lock_init(&tgt->tgt_lock);
570 sg_init_table(tgt->sg, sg_elems + 2);
571 return tgt;
572}
573
574static void virtscsi_scan(struct virtio_device *vdev)
575{
576 struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
577
578 scsi_scan_host(shost);
579}
580
581static void virtscsi_remove_vqs(struct virtio_device *vdev)
582{
583 struct Scsi_Host *sh = virtio_scsi_host(vdev);
584 struct virtio_scsi *vscsi = shost_priv(sh);
585 u32 i, num_targets;
586
587 /* Stop all the virtqueues. */
588 vdev->config->reset(vdev);
589
590 num_targets = sh->max_id;
591 for (i = 0; i < num_targets; i++) {
592 kfree(vscsi->tgt[i]);
593 vscsi->tgt[i] = NULL;
594 }
595
596 vdev->config->del_vqs(vdev);
597}
598
411static int virtscsi_init(struct virtio_device *vdev, 599static int virtscsi_init(struct virtio_device *vdev,
412 struct virtio_scsi *vscsi) 600 struct virtio_scsi *vscsi, int num_targets)
413{ 601{
414 int err; 602 int err;
415 struct virtqueue *vqs[3]; 603 struct virtqueue *vqs[3];
604 u32 i, sg_elems;
605
416 vq_callback_t *callbacks[] = { 606 vq_callback_t *callbacks[] = {
417 virtscsi_ctrl_done, 607 virtscsi_ctrl_done,
418 virtscsi_event_done, 608 virtscsi_event_done,
@@ -429,13 +619,32 @@ static int virtscsi_init(struct virtio_device *vdev,
429 if (err) 619 if (err)
430 return err; 620 return err;
431 621
432 vscsi->ctrl_vq = vqs[0]; 622 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
433 vscsi->event_vq = vqs[1]; 623 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
434 vscsi->req_vq = vqs[2]; 624 virtscsi_init_vq(&vscsi->req_vq, vqs[2]);
435 625
436 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); 626 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
437 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); 627 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
438 return 0; 628
629 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
630 virtscsi_kick_event_all(vscsi);
631
632 /* We need to know how many segments before we allocate. */
633 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
634
635 for (i = 0; i < num_targets; i++) {
636 vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems);
637 if (!vscsi->tgt[i]) {
638 err = -ENOMEM;
639 goto out;
640 }
641 }
642 err = 0;
643
644out:
645 if (err)
646 virtscsi_remove_vqs(vdev);
647 return err;
439} 648}
440 649
441static int __devinit virtscsi_probe(struct virtio_device *vdev) 650static int __devinit virtscsi_probe(struct virtio_device *vdev)
@@ -443,31 +652,25 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev)
443 struct Scsi_Host *shost; 652 struct Scsi_Host *shost;
444 struct virtio_scsi *vscsi; 653 struct virtio_scsi *vscsi;
445 int err; 654 int err;
446 u32 sg_elems; 655 u32 sg_elems, num_targets;
447 u32 cmd_per_lun; 656 u32 cmd_per_lun;
448 657
449 /* We need to know how many segments before we allocate.
450 * We need an extra sg elements at head and tail.
451 */
452 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
453
454 /* Allocate memory and link the structs together. */ 658 /* Allocate memory and link the structs together. */
659 num_targets = virtscsi_config_get(vdev, max_target) + 1;
455 shost = scsi_host_alloc(&virtscsi_host_template, 660 shost = scsi_host_alloc(&virtscsi_host_template,
456 sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2)); 661 sizeof(*vscsi)
662 + num_targets * sizeof(struct virtio_scsi_target_state));
457 663
458 if (!shost) 664 if (!shost)
459 return -ENOMEM; 665 return -ENOMEM;
460 666
667 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
461 shost->sg_tablesize = sg_elems; 668 shost->sg_tablesize = sg_elems;
462 vscsi = shost_priv(shost); 669 vscsi = shost_priv(shost);
463 vscsi->vdev = vdev; 670 vscsi->vdev = vdev;
464 vdev->priv = shost; 671 vdev->priv = shost;
465 672
466 /* Random initializations. */ 673 err = virtscsi_init(vdev, vscsi, num_targets);
467 spin_lock_init(&vscsi->vq_lock);
468 sg_init_table(vscsi->sg, sg_elems + 2);
469
470 err = virtscsi_init(vdev, vscsi);
471 if (err) 674 if (err)
472 goto virtscsi_init_failed; 675 goto virtscsi_init_failed;
473 676
@@ -475,15 +678,16 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev)
475 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); 678 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
476 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; 679 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
477 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1; 680 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
478 shost->max_id = virtscsi_config_get(vdev, max_target) + 1; 681 shost->max_id = num_targets;
479 shost->max_channel = 0; 682 shost->max_channel = 0;
480 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; 683 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
481 err = scsi_add_host(shost, &vdev->dev); 684 err = scsi_add_host(shost, &vdev->dev);
482 if (err) 685 if (err)
483 goto scsi_add_host_failed; 686 goto scsi_add_host_failed;
484 687 /*
485 scsi_scan_host(shost); 688 * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
486 689 * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
690 */
487 return 0; 691 return 0;
488 692
489scsi_add_host_failed: 693scsi_add_host_failed:
@@ -493,17 +697,13 @@ virtscsi_init_failed:
493 return err; 697 return err;
494} 698}
495 699
496static void virtscsi_remove_vqs(struct virtio_device *vdev)
497{
498 /* Stop all the virtqueues. */
499 vdev->config->reset(vdev);
500
501 vdev->config->del_vqs(vdev);
502}
503
504static void __devexit virtscsi_remove(struct virtio_device *vdev) 700static void __devexit virtscsi_remove(struct virtio_device *vdev)
505{ 701{
506 struct Scsi_Host *shost = virtio_scsi_host(vdev); 702 struct Scsi_Host *shost = virtio_scsi_host(vdev);
703 struct virtio_scsi *vscsi = shost_priv(shost);
704
705 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
706 virtscsi_cancel_event_work(vscsi);
507 707
508 scsi_remove_host(shost); 708 scsi_remove_host(shost);
509 709
@@ -523,7 +723,7 @@ static int virtscsi_restore(struct virtio_device *vdev)
523 struct Scsi_Host *sh = virtio_scsi_host(vdev); 723 struct Scsi_Host *sh = virtio_scsi_host(vdev);
524 struct virtio_scsi *vscsi = shost_priv(sh); 724 struct virtio_scsi *vscsi = shost_priv(sh);
525 725
526 return virtscsi_init(vdev, vscsi); 726 return virtscsi_init(vdev, vscsi, sh->max_id);
527} 727}
528#endif 728#endif
529 729
@@ -532,11 +732,18 @@ static struct virtio_device_id id_table[] = {
532 { 0 }, 732 { 0 },
533}; 733};
534 734
735static unsigned int features[] = {
736 VIRTIO_SCSI_F_HOTPLUG
737};
738
535static struct virtio_driver virtio_scsi_driver = { 739static struct virtio_driver virtio_scsi_driver = {
740 .feature_table = features,
741 .feature_table_size = ARRAY_SIZE(features),
536 .driver.name = KBUILD_MODNAME, 742 .driver.name = KBUILD_MODNAME,
537 .driver.owner = THIS_MODULE, 743 .driver.owner = THIS_MODULE,
538 .id_table = id_table, 744 .id_table = id_table,
539 .probe = virtscsi_probe, 745 .probe = virtscsi_probe,
746 .scan = virtscsi_scan,
540#ifdef CONFIG_PM 747#ifdef CONFIG_PM
541 .freeze = virtscsi_freeze, 748 .freeze = virtscsi_freeze,
542 .restore = virtscsi_restore, 749 .restore = virtscsi_restore,