summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 13:49:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 13:49:33 -0500
commita829a8445f09036404060f4d6489cb13433f4304 (patch)
tree60067e1425239a9f372c10100ede39691c3d612b /drivers/scsi
parent84b6079134420f4635f23c2088a3892057b23bb0 (diff)
parentf5b893c947151d424a4ab55ea3a8544b81974b31 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This update includes the usual round of major driver updates (ncr5380, lpfc, hisi_sas, megaraid_sas, ufs, ibmvscsis, mpt3sas). There's also an assortment of minor fixes, mostly in error legs or other not very user visible stuff. The major change is the pci_alloc_irq_vectors replacement for the old pci_msix_.. calls; this effectively makes IRQ mapping generic for the drivers and allows blk_mq to use the information" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (256 commits) scsi: qla4xxx: switch to pci_alloc_irq_vectors scsi: hisi_sas: support deferred probe for v2 hw scsi: megaraid_sas: switch to pci_alloc_irq_vectors scsi: scsi_devinfo: remove synchronous ALUA for NETAPP devices scsi: be2iscsi: set errno on error path scsi: be2iscsi: set errno on error path scsi: hpsa: fallback to use legacy REPORT PHYS command scsi: scsi_dh_alua: Fix RCU annotations scsi: hpsa: use %phN for short hex dumps scsi: hisi_sas: fix free'ing in probe and remove scsi: isci: switch to pci_alloc_irq_vectors scsi: ipr: Fix runaway IRQs when falling back from MSI to LSI scsi: dpt_i2o: double free on error path scsi: cxlflash: Migrate scsi command pointer to AFU command scsi: cxlflash: Migrate IOARRIN specific routines to function pointers scsi: cxlflash: Cleanup queuecommand() scsi: cxlflash: Cleanup send_tmf() scsi: cxlflash: Remove AFU command lock scsi: cxlflash: Wait for active AFU commands to timeout upon tear down scsi: cxlflash: Remove private command pool ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig35
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c137
-rw-r--r--drivers/scsi/NCR5380.h87
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/comminit.c10
-rw-r--r--drivers/scsi/aacraid/commsup.c25
-rw-r--r--drivers/scsi/aacraid/linit.c20
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c5
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h5
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c82
-rw-r--r--drivers/scsi/arm/cumana_1.c98
-rw-r--r--drivers/scsi/arm/oak.c34
-rw-r--r--drivers/scsi/atari_scsi.c77
-rw-r--r--drivers/scsi/be2iscsi/be_main.c8
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h30
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c62
-rw-r--r--drivers/scsi/bfa/bfad_im.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxlflash/common.h39
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c6
-rw-r--r--drivers/scsi/cxlflash/main.c410
-rw-r--r--drivers/scsi/cxlflash/sislite.h2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c16
-rw-r--r--drivers/scsi/dmx3191d.c33
-rw-r--r--drivers/scsi/dpt_i2o.c7
-rw-r--r--drivers/scsi/fcoe/fcoe.c25
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c157
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c83
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c30
-rw-r--r--drivers/scsi/fnic/fnic_trace.c4
-rw-r--r--drivers/scsi/fnic/fnic_trace.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c10
-rw-r--r--drivers/scsi/g_NCR5380.c296
-rw-r--r--drivers/scsi/g_NCR5380.h32
-rw-r--r--drivers/scsi/g_NCR5380_mmio.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c79
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c556
-rw-r--r--drivers/scsi/hpsa.c252
-rw-r--r--drivers/scsi/hpsa.h6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c40
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c900
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h5
-rw-r--r--drivers/scsi/ipr.c174
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/ips.c13
-rw-r--r--drivers/scsi/isci/host.h1
-rw-r--r--drivers/scsi/isci/init.c23
-rw-r--r--drivers/scsi/isci/probe_roms.c1
-rw-r--r--drivers/scsi/isci/remote_node_context.c7
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c61
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c256
-rw-r--r--drivers/scsi/libfc/fc_fcp.c235
-rw-r--r--drivers/scsi/libfc/fc_libfc.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c126
-rw-r--r--drivers/scsi/libfc/fc_rport.c561
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c422
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c83
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c136
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c23
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c186
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c69
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c129
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c2
-rw-r--r--drivers/scsi/pmcraid.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c449
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c15
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h18
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c27
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c97
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c51
-rw-r--r--drivers/scsi/scsi_transport_fc.c455
-rw-r--r--drivers/scsi/scsi_transport_srp.c52
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c102
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3_scsi.c80
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c39
-rw-r--r--drivers/scsi/ufs/ufs.h5
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c482
-rw-r--r--drivers/scsi/ufs/ufshcd.h46
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/ufs/unipro.h4
119 files changed, 5015 insertions, 3857 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3e2bdb90813c..dfa93347c752 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,7 @@ config SCSI_SPI_ATTRS
263config SCSI_FC_ATTRS 263config SCSI_FC_ATTRS
264 tristate "FiberChannel Transport Attributes" 264 tristate "FiberChannel Transport Attributes"
265 depends on SCSI && NET 265 depends on SCSI && NET
266 select BLK_DEV_BSGLIB
266 select SCSI_NETLINK 267 select SCSI_NETLINK
267 help 268 help
268 If you wish to export transport-specific information about 269 If you wish to export transport-specific information about
@@ -743,40 +744,18 @@ config SCSI_ISCI
743 control unit found in the Intel(R) C600 series chipset. 744 control unit found in the Intel(R) C600 series chipset.
744 745
745config SCSI_GENERIC_NCR5380 746config SCSI_GENERIC_NCR5380
746 tristate "Generic NCR5380/53c400 SCSI PIO support" 747 tristate "Generic NCR5380/53c400 SCSI ISA card support"
747 depends on ISA && SCSI 748 depends on ISA && SCSI && HAS_IOPORT_MAP
748 select SCSI_SPI_ATTRS 749 select SCSI_SPI_ATTRS
749 ---help--- 750 ---help---
750 This is a driver for the old NCR 53c80 series of SCSI controllers 751 This is a driver for old ISA card SCSI controllers based on a
751 on boards using PIO. Most boards such as the Trantor T130 fit this 752 NCR 5380, 53C80, 53C400, 53C400A, or DTC 436 device.
752 category, along with a large number of ISA 8bit controllers shipped 753 Most boards such as the Trantor T130 fit this category, as do
753 for free with SCSI scanners. If you have a PAS16, T128 or DMX3191 754 various 8-bit and 16-bit ISA cards bundled with SCSI scanners.
754 you should select the specific driver for that card rather than
755 generic 5380 support.
756
757 It is explained in section 3.8 of the SCSI-HOWTO, available from
758 <http://www.tldp.org/docs.html#howto>. If it doesn't work out
759 of the box, you may have to change some settings in
760 <file:drivers/scsi/g_NCR5380.h>.
761 755
762 To compile this driver as a module, choose M here: the 756 To compile this driver as a module, choose M here: the
763 module will be called g_NCR5380. 757 module will be called g_NCR5380.
764 758
765config SCSI_GENERIC_NCR5380_MMIO
766 tristate "Generic NCR5380/53c400 SCSI MMIO support"
767 depends on ISA && SCSI
768 select SCSI_SPI_ATTRS
769 ---help---
770 This is a driver for the old NCR 53c80 series of SCSI controllers
771 on boards using memory mapped I/O.
772 It is explained in section 3.8 of the SCSI-HOWTO, available from
773 <http://www.tldp.org/docs.html#howto>. If it doesn't work out
774 of the box, you may have to change some settings in
775 <file:drivers/scsi/g_NCR5380.h>.
776
777 To compile this driver as a module, choose M here: the
778 module will be called g_NCR5380_mmio.
779
780config SCSI_IPS 759config SCSI_IPS
781 tristate "IBM ServeRAID support" 760 tristate "IBM ServeRAID support"
782 depends on PCI && SCSI 761 depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1520596f54a6..a2d03957cbe2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -74,7 +74,6 @@ obj-$(CONFIG_SCSI_ISCI) += isci/
74obj-$(CONFIG_SCSI_IPS) += ips.o 74obj-$(CONFIG_SCSI_IPS) += ips.o
75obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o 75obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
76obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o 76obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
77obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
78obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o 77obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
79obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o 78obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
80obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o 79obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 790babc5ef66..d849ffa378b1 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -121,9 +121,10 @@
121 * 121 *
122 * Either real DMA *or* pseudo DMA may be implemented 122 * Either real DMA *or* pseudo DMA may be implemented
123 * 123 *
124 * NCR5380_dma_write_setup(instance, src, count) - initialize 124 * NCR5380_dma_xfer_len - determine size of DMA/PDMA transfer
125 * NCR5380_dma_read_setup(instance, dst, count) - initialize 125 * NCR5380_dma_send_setup - execute DMA/PDMA from memory to 5380
126 * NCR5380_dma_residual(instance); - residual count 126 * NCR5380_dma_recv_setup - execute DMA/PDMA from 5380 to memory
127 * NCR5380_dma_residual - residual byte count
127 * 128 *
128 * The generic driver is initialized by calling NCR5380_init(instance), 129 * The generic driver is initialized by calling NCR5380_init(instance),
129 * after setting the appropriate host specific fields and ID. If the 130 * after setting the appropriate host specific fields and ID. If the
@@ -178,7 +179,7 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
178 179
179/** 180/**
180 * NCR5380_poll_politely2 - wait for two chip register values 181 * NCR5380_poll_politely2 - wait for two chip register values
181 * @instance: controller to poll 182 * @hostdata: host private data
182 * @reg1: 5380 register to poll 183 * @reg1: 5380 register to poll
183 * @bit1: Bitmask to check 184 * @bit1: Bitmask to check
184 * @val1: Expected value 185 * @val1: Expected value
@@ -195,18 +196,14 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
195 * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. 196 * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
196 */ 197 */
197 198
198static int NCR5380_poll_politely2(struct Scsi_Host *instance, 199static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata,
199 int reg1, int bit1, int val1, 200 unsigned int reg1, u8 bit1, u8 val1,
200 int reg2, int bit2, int val2, int wait) 201 unsigned int reg2, u8 bit2, u8 val2,
202 unsigned long wait)
201{ 203{
202 struct NCR5380_hostdata *hostdata = shost_priv(instance); 204 unsigned long n = hostdata->poll_loops;
203 unsigned long deadline = jiffies + wait; 205 unsigned long deadline = jiffies + wait;
204 unsigned long n;
205 206
206 /* Busy-wait for up to 10 ms */
207 n = min(10000U, jiffies_to_usecs(wait));
208 n *= hostdata->accesses_per_ms;
209 n /= 2000;
210 do { 207 do {
211 if ((NCR5380_read(reg1) & bit1) == val1) 208 if ((NCR5380_read(reg1) & bit1) == val1)
212 return 0; 209 return 0;
@@ -288,6 +285,7 @@ mrs[] = {
288 285
289static void NCR5380_print(struct Scsi_Host *instance) 286static void NCR5380_print(struct Scsi_Host *instance)
290{ 287{
288 struct NCR5380_hostdata *hostdata = shost_priv(instance);
291 unsigned char status, data, basr, mr, icr, i; 289 unsigned char status, data, basr, mr, icr, i;
292 290
293 data = NCR5380_read(CURRENT_SCSI_DATA_REG); 291 data = NCR5380_read(CURRENT_SCSI_DATA_REG);
@@ -337,6 +335,7 @@ static struct {
337 335
338static void NCR5380_print_phase(struct Scsi_Host *instance) 336static void NCR5380_print_phase(struct Scsi_Host *instance)
339{ 337{
338 struct NCR5380_hostdata *hostdata = shost_priv(instance);
340 unsigned char status; 339 unsigned char status;
341 int i; 340 int i;
342 341
@@ -441,14 +440,14 @@ static void prepare_info(struct Scsi_Host *instance)
441 struct NCR5380_hostdata *hostdata = shost_priv(instance); 440 struct NCR5380_hostdata *hostdata = shost_priv(instance);
442 441
443 snprintf(hostdata->info, sizeof(hostdata->info), 442 snprintf(hostdata->info, sizeof(hostdata->info),
444 "%s, io_port 0x%lx, n_io_port %d, " 443 "%s, irq %d, "
445 "base 0x%lx, irq %d, " 444 "io_port 0x%lx, base 0x%lx, "
446 "can_queue %d, cmd_per_lun %d, " 445 "can_queue %d, cmd_per_lun %d, "
447 "sg_tablesize %d, this_id %d, " 446 "sg_tablesize %d, this_id %d, "
448 "flags { %s%s%s}, " 447 "flags { %s%s%s}, "
449 "options { %s} ", 448 "options { %s} ",
450 instance->hostt->name, instance->io_port, instance->n_io_port, 449 instance->hostt->name, instance->irq,
451 instance->base, instance->irq, 450 hostdata->io_port, hostdata->base,
452 instance->can_queue, instance->cmd_per_lun, 451 instance->can_queue, instance->cmd_per_lun,
453 instance->sg_tablesize, instance->this_id, 452 instance->sg_tablesize, instance->this_id,
454 hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "", 453 hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
@@ -482,6 +481,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
482 struct NCR5380_hostdata *hostdata = shost_priv(instance); 481 struct NCR5380_hostdata *hostdata = shost_priv(instance);
483 int i; 482 int i;
484 unsigned long deadline; 483 unsigned long deadline;
484 unsigned long accesses_per_ms;
485 485
486 instance->max_lun = 7; 486 instance->max_lun = 7;
487 487
@@ -530,7 +530,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
530 ++i; 530 ++i;
531 cpu_relax(); 531 cpu_relax();
532 } while (time_is_after_jiffies(deadline)); 532 } while (time_is_after_jiffies(deadline));
533 hostdata->accesses_per_ms = i / 256; 533 accesses_per_ms = i / 256;
534 hostdata->poll_loops = NCR5380_REG_POLL_TIME * accesses_per_ms / 2;
534 535
535 return 0; 536 return 0;
536} 537}
@@ -560,7 +561,7 @@ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
560 case 3: 561 case 3:
561 case 5: 562 case 5:
562 shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n"); 563 shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n");
563 NCR5380_poll_politely(instance, 564 NCR5380_poll_politely(hostdata,
564 STATUS_REG, SR_BSY, 0, 5 * HZ); 565 STATUS_REG, SR_BSY, 0, 5 * HZ);
565 break; 566 break;
566 case 2: 567 case 2:
@@ -871,7 +872,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
871 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 872 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
872 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 873 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
873 874
874 transferred = hostdata->dma_len - NCR5380_dma_residual(instance); 875 transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
875 hostdata->dma_len = 0; 876 hostdata->dma_len = 0;
876 877
877 data = (unsigned char **)&hostdata->connected->SCp.ptr; 878 data = (unsigned char **)&hostdata->connected->SCp.ptr;
@@ -994,7 +995,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
994 } 995 }
995 handled = 1; 996 handled = 1;
996 } else { 997 } else {
997 shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); 998 dsprintk(NDEBUG_INTR, instance, "interrupt without IRQ bit\n");
998#ifdef SUN3_SCSI_VME 999#ifdef SUN3_SCSI_VME
999 dregs->csr |= CSR_DMA_ENABLE; 1000 dregs->csr |= CSR_DMA_ENABLE;
1000#endif 1001#endif
@@ -1075,7 +1076,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
1075 */ 1076 */
1076 1077
1077 spin_unlock_irq(&hostdata->lock); 1078 spin_unlock_irq(&hostdata->lock);
1078 err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0, 1079 err = NCR5380_poll_politely2(hostdata, MODE_REG, MR_ARBITRATE, 0,
1079 INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, 1080 INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS,
1080 ICR_ARBITRATION_PROGRESS, HZ); 1081 ICR_ARBITRATION_PROGRESS, HZ);
1081 spin_lock_irq(&hostdata->lock); 1082 spin_lock_irq(&hostdata->lock);
@@ -1201,7 +1202,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
1201 * selection. 1202 * selection.
1202 */ 1203 */
1203 1204
1204 err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY, 1205 err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, SR_BSY,
1205 msecs_to_jiffies(250)); 1206 msecs_to_jiffies(250));
1206 1207
1207 if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { 1208 if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
@@ -1247,7 +1248,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
1247 1248
1248 /* Wait for start of REQ/ACK handshake */ 1249 /* Wait for start of REQ/ACK handshake */
1249 1250
1250 err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); 1251 err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
1251 spin_lock_irq(&hostdata->lock); 1252 spin_lock_irq(&hostdata->lock);
1252 if (err < 0) { 1253 if (err < 0) {
1253 shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); 1254 shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
@@ -1318,6 +1319,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1318 unsigned char *phase, int *count, 1319 unsigned char *phase, int *count,
1319 unsigned char **data) 1320 unsigned char **data)
1320{ 1321{
1322 struct NCR5380_hostdata *hostdata = shost_priv(instance);
1321 unsigned char p = *phase, tmp; 1323 unsigned char p = *phase, tmp;
1322 int c = *count; 1324 int c = *count;
1323 unsigned char *d = *data; 1325 unsigned char *d = *data;
@@ -1336,7 +1338,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1336 * valid 1338 * valid
1337 */ 1339 */
1338 1340
1339 if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0) 1341 if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
1340 break; 1342 break;
1341 1343
1342 dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); 1344 dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1381,7 +1383,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1381 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); 1383 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
1382 } 1384 }
1383 1385
1384 if (NCR5380_poll_politely(instance, 1386 if (NCR5380_poll_politely(hostdata,
1385 STATUS_REG, SR_REQ, 0, 5 * HZ) < 0) 1387 STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
1386 break; 1388 break;
1387 1389
@@ -1440,6 +1442,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1440 1442
1441static void do_reset(struct Scsi_Host *instance) 1443static void do_reset(struct Scsi_Host *instance)
1442{ 1444{
1445 struct NCR5380_hostdata __maybe_unused *hostdata = shost_priv(instance);
1443 unsigned long flags; 1446 unsigned long flags;
1444 1447
1445 local_irq_save(flags); 1448 local_irq_save(flags);
@@ -1462,6 +1465,7 @@ static void do_reset(struct Scsi_Host *instance)
1462 1465
1463static int do_abort(struct Scsi_Host *instance) 1466static int do_abort(struct Scsi_Host *instance)
1464{ 1467{
1468 struct NCR5380_hostdata *hostdata = shost_priv(instance);
1465 unsigned char *msgptr, phase, tmp; 1469 unsigned char *msgptr, phase, tmp;
1466 int len; 1470 int len;
1467 int rc; 1471 int rc;
@@ -1479,7 +1483,7 @@ static int do_abort(struct Scsi_Host *instance)
1479 * the target sees, so we just handshake. 1483 * the target sees, so we just handshake.
1480 */ 1484 */
1481 1485
1482 rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); 1486 rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
1483 if (rc < 0) 1487 if (rc < 0)
1484 goto timeout; 1488 goto timeout;
1485 1489
@@ -1490,7 +1494,7 @@ static int do_abort(struct Scsi_Host *instance)
1490 if (tmp != PHASE_MSGOUT) { 1494 if (tmp != PHASE_MSGOUT) {
1491 NCR5380_write(INITIATOR_COMMAND_REG, 1495 NCR5380_write(INITIATOR_COMMAND_REG,
1492 ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); 1496 ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
1493 rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ); 1497 rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
1494 if (rc < 0) 1498 if (rc < 0)
1495 goto timeout; 1499 goto timeout;
1496 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 1500 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1575,9 +1579,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1575 * starting the NCR. This is also the cleaner way for the TT. 1579 * starting the NCR. This is also the cleaner way for the TT.
1576 */ 1580 */
1577 if (p & SR_IO) 1581 if (p & SR_IO)
1578 result = NCR5380_dma_recv_setup(instance, d, c); 1582 result = NCR5380_dma_recv_setup(hostdata, d, c);
1579 else 1583 else
1580 result = NCR5380_dma_send_setup(instance, d, c); 1584 result = NCR5380_dma_send_setup(hostdata, d, c);
1581 } 1585 }
1582 1586
1583 /* 1587 /*
@@ -1609,9 +1613,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1609 * NCR access, else the DMA setup gets trashed! 1613 * NCR access, else the DMA setup gets trashed!
1610 */ 1614 */
1611 if (p & SR_IO) 1615 if (p & SR_IO)
1612 result = NCR5380_dma_recv_setup(instance, d, c); 1616 result = NCR5380_dma_recv_setup(hostdata, d, c);
1613 else 1617 else
1614 result = NCR5380_dma_send_setup(instance, d, c); 1618 result = NCR5380_dma_send_setup(hostdata, d, c);
1615 } 1619 }
1616 1620
1617 /* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */ 1621 /* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */
@@ -1678,12 +1682,12 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1678 * byte. 1682 * byte.
1679 */ 1683 */
1680 1684
1681 if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, 1685 if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
1682 BASR_DRQ, BASR_DRQ, HZ) < 0) { 1686 BASR_DRQ, BASR_DRQ, HZ) < 0) {
1683 result = -1; 1687 result = -1;
1684 shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); 1688 shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
1685 } 1689 }
1686 if (NCR5380_poll_politely(instance, STATUS_REG, 1690 if (NCR5380_poll_politely(hostdata, STATUS_REG,
1687 SR_REQ, 0, HZ) < 0) { 1691 SR_REQ, 0, HZ) < 0) {
1688 result = -1; 1692 result = -1;
1689 shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); 1693 shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
@@ -1694,7 +1698,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1694 * Wait for the last byte to be sent. If REQ is being asserted for 1698 * Wait for the last byte to be sent. If REQ is being asserted for
1695 * the byte we're interested, we'll ACK it and it will go false. 1699 * the byte we're interested, we'll ACK it and it will go false.
1696 */ 1700 */
1697 if (NCR5380_poll_politely2(instance, 1701 if (NCR5380_poll_politely2(hostdata,
1698 BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, 1702 BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
1699 BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) { 1703 BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
1700 result = -1; 1704 result = -1;
@@ -1751,22 +1755,26 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1751 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); 1755 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
1752 } 1756 }
1753#ifdef CONFIG_SUN3 1757#ifdef CONFIG_SUN3
1754 if (phase == PHASE_CMDOUT) { 1758 if (phase == PHASE_CMDOUT &&
1755 void *d; 1759 sun3_dma_setup_done != cmd) {
1756 unsigned long count; 1760 int count;
1757 1761
1758 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { 1762 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
1759 count = cmd->SCp.buffer->length; 1763 ++cmd->SCp.buffer;
1760 d = sg_virt(cmd->SCp.buffer); 1764 --cmd->SCp.buffers_residual;
1761 } else { 1765 cmd->SCp.this_residual = cmd->SCp.buffer->length;
1762 count = cmd->SCp.this_residual; 1766 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
1763 d = cmd->SCp.ptr;
1764 } 1767 }
1765 1768
1766 if (sun3_dma_setup_done != cmd && 1769 count = sun3scsi_dma_xfer_len(hostdata, cmd);
1767 sun3scsi_dma_xfer_len(count, cmd) > 0) { 1770
1768 sun3scsi_dma_setup(instance, d, count, 1771 if (count > 0) {
1769 rq_data_dir(cmd->request)); 1772 if (rq_data_dir(cmd->request))
1773 sun3scsi_dma_send_setup(hostdata,
1774 cmd->SCp.ptr, count);
1775 else
1776 sun3scsi_dma_recv_setup(hostdata,
1777 cmd->SCp.ptr, count);
1770 sun3_dma_setup_done = cmd; 1778 sun3_dma_setup_done = cmd;
1771 } 1779 }
1772#ifdef SUN3_SCSI_VME 1780#ifdef SUN3_SCSI_VME
@@ -1827,7 +1835,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1827 1835
1828 transfersize = 0; 1836 transfersize = 0;
1829 if (!cmd->device->borken) 1837 if (!cmd->device->borken)
1830 transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); 1838 transfersize = NCR5380_dma_xfer_len(hostdata, cmd);
1831 1839
1832 if (transfersize > 0) { 1840 if (transfersize > 0) {
1833 len = transfersize; 1841 len = transfersize;
@@ -2073,7 +2081,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2073 } /* switch(phase) */ 2081 } /* switch(phase) */
2074 } else { 2082 } else {
2075 spin_unlock_irq(&hostdata->lock); 2083 spin_unlock_irq(&hostdata->lock);
2076 NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); 2084 NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
2077 spin_lock_irq(&hostdata->lock); 2085 spin_lock_irq(&hostdata->lock);
2078 } 2086 }
2079 } 2087 }
@@ -2119,7 +2127,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2119 */ 2127 */
2120 2128
2121 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); 2129 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
2122 if (NCR5380_poll_politely(instance, 2130 if (NCR5380_poll_politely(hostdata,
2123 STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { 2131 STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
2124 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2132 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2125 return; 2133 return;
@@ -2130,7 +2138,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2130 * Wait for target to go into MSGIN. 2138 * Wait for target to go into MSGIN.
2131 */ 2139 */
2132 2140
2133 if (NCR5380_poll_politely(instance, 2141 if (NCR5380_poll_politely(hostdata,
2134 STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { 2142 STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
2135 do_abort(instance); 2143 do_abort(instance);
2136 return; 2144 return;
@@ -2204,22 +2212,25 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2204 } 2212 }
2205 2213
2206#ifdef CONFIG_SUN3 2214#ifdef CONFIG_SUN3
2207 { 2215 if (sun3_dma_setup_done != tmp) {
2208 void *d; 2216 int count;
2209 unsigned long count;
2210 2217
2211 if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { 2218 if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
2212 count = tmp->SCp.buffer->length; 2219 ++tmp->SCp.buffer;
2213 d = sg_virt(tmp->SCp.buffer); 2220 --tmp->SCp.buffers_residual;
2214 } else { 2221 tmp->SCp.this_residual = tmp->SCp.buffer->length;
2215 count = tmp->SCp.this_residual; 2222 tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
2216 d = tmp->SCp.ptr;
2217 } 2223 }
2218 2224
2219 if (sun3_dma_setup_done != tmp && 2225 count = sun3scsi_dma_xfer_len(hostdata, tmp);
2220 sun3scsi_dma_xfer_len(count, tmp) > 0) { 2226
2221 sun3scsi_dma_setup(instance, d, count, 2227 if (count > 0) {
2222 rq_data_dir(tmp->request)); 2228 if (rq_data_dir(tmp->request))
2229 sun3scsi_dma_send_setup(hostdata,
2230 tmp->SCp.ptr, count);
2231 else
2232 sun3scsi_dma_recv_setup(hostdata,
2233 tmp->SCp.ptr, count);
2223 sun3_dma_setup_done = tmp; 2234 sun3_dma_setup_done = tmp;
2224 } 2235 }
2225 } 2236 }
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 965d92339455..3c6ce5434449 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -219,27 +219,32 @@
219#define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */ 219#define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */
220 220
221struct NCR5380_hostdata { 221struct NCR5380_hostdata {
222 NCR5380_implementation_fields; /* implementation specific */ 222 NCR5380_implementation_fields; /* Board-specific data */
223 struct Scsi_Host *host; /* Host backpointer */ 223 u8 __iomem *io; /* Remapped 5380 address */
224 unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */ 224 u8 __iomem *pdma_io; /* Remapped PDMA address */
225 unsigned char busy[8]; /* index = target, bit = lun */ 225 unsigned long poll_loops; /* Register polling limit */
226 int dma_len; /* requested length of DMA */ 226 spinlock_t lock; /* Protects this struct */
227 unsigned char last_message; /* last message OUT */ 227 struct scsi_cmnd *connected; /* Currently connected cmnd */
228 struct scsi_cmnd *connected; /* currently connected cmnd */ 228 struct list_head disconnected; /* Waiting for reconnect */
229 struct scsi_cmnd *selecting; /* cmnd to be connected */ 229 struct Scsi_Host *host; /* SCSI host backpointer */
230 struct list_head unissued; /* waiting to be issued */ 230 struct workqueue_struct *work_q; /* SCSI host work queue */
231 struct list_head autosense; /* priority issue queue */ 231 struct work_struct main_task; /* Work item for main loop */
232 struct list_head disconnected; /* waiting for reconnect */ 232 int flags; /* Board-specific quirks */
233 spinlock_t lock; /* protects this struct */ 233 int dma_len; /* Requested length of DMA */
234 int flags; 234 int read_overruns; /* Transfer size reduction for DMA erratum */
235 struct scsi_eh_save ses; 235 unsigned long io_port; /* Device IO port */
236 struct scsi_cmnd *sensing; 236 unsigned long base; /* Device base address */
237 struct list_head unissued; /* Waiting to be issued */
238 struct scsi_cmnd *selecting; /* Cmnd to be connected */
239 struct list_head autosense; /* Priority cmnd queue */
240 struct scsi_cmnd *sensing; /* Cmnd needing autosense */
241 struct scsi_eh_save ses; /* Cmnd state saved for EH */
242 unsigned char busy[8]; /* Index = target, bit = lun */
243 unsigned char id_mask; /* 1 << Host ID */
244 unsigned char id_higher_mask; /* All bits above id_mask */
245 unsigned char last_message; /* Last Message Out */
246 unsigned long region_size; /* Size of address/port range */
237 char info[256]; 247 char info[256];
238 int read_overruns; /* number of bytes to cut from a
239 * transfer to handle chip overruns */
240 struct work_struct main_task;
241 struct workqueue_struct *work_q;
242 unsigned long accesses_per_ms; /* chip register accesses per ms */
243}; 248};
244 249
245#ifdef __KERNEL__ 250#ifdef __KERNEL__
@@ -252,6 +257,9 @@ struct NCR5380_cmd {
252 257
253#define NCR5380_PIO_CHUNK_SIZE 256 258#define NCR5380_PIO_CHUNK_SIZE 256
254 259
260/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
261#define NCR5380_REG_POLL_TIME 15
262
255static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr) 263static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
256{ 264{
257 return ((struct scsi_cmnd *)ncmd_ptr) - 1; 265 return ((struct scsi_cmnd *)ncmd_ptr) - 1;
@@ -294,14 +302,45 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
294static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); 302static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
295static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 303static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
296static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 304static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
297static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int); 305static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
306 unsigned int, u8, u8,
307 unsigned int, u8, u8, unsigned long);
298 308
299static inline int NCR5380_poll_politely(struct Scsi_Host *instance, 309static inline int NCR5380_poll_politely(struct NCR5380_hostdata *hostdata,
300 int reg, int bit, int val, int wait) 310 unsigned int reg, u8 bit, u8 val,
311 unsigned long wait)
301{ 312{
302 return NCR5380_poll_politely2(instance, reg, bit, val, 313 if ((NCR5380_read(reg) & bit) == val)
314 return 0;
315
316 return NCR5380_poll_politely2(hostdata, reg, bit, val,
303 reg, bit, val, wait); 317 reg, bit, val, wait);
304} 318}
305 319
320static int NCR5380_dma_xfer_len(struct NCR5380_hostdata *,
321 struct scsi_cmnd *);
322static int NCR5380_dma_send_setup(struct NCR5380_hostdata *,
323 unsigned char *, int);
324static int NCR5380_dma_recv_setup(struct NCR5380_hostdata *,
325 unsigned char *, int);
326static int NCR5380_dma_residual(struct NCR5380_hostdata *);
327
328static inline int NCR5380_dma_xfer_none(struct NCR5380_hostdata *hostdata,
329 struct scsi_cmnd *cmd)
330{
331 return 0;
332}
333
334static inline int NCR5380_dma_setup_none(struct NCR5380_hostdata *hostdata,
335 unsigned char *data, int count)
336{
337 return 0;
338}
339
340static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata)
341{
342 return 0;
343}
344
306#endif /* __KERNEL__ */ 345#endif /* __KERNEL__ */
307#endif /* NCR5380_H */ 346#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 969c312de1be..f059c14efa0c 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1246,7 +1246,6 @@ struct aac_dev
1246 u32 max_msix; /* max. MSI-X vectors */ 1246 u32 max_msix; /* max. MSI-X vectors */
1247 u32 vector_cap; /* MSI-X vector capab.*/ 1247 u32 vector_cap; /* MSI-X vector capab.*/
1248 int msi_enabled; /* MSI/MSI-X enabled */ 1248 int msi_enabled; /* MSI/MSI-X enabled */
1249 struct msix_entry msixentry[AAC_MAX_MSIX];
1250 struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ 1249 struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
1251 u8 adapter_shutdown; 1250 u8 adapter_shutdown;
1252 u32 handle_pci_error; 1251 u32 handle_pci_error;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 341ea327ae79..4f56b1003cc7 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -378,16 +378,12 @@ void aac_define_int_mode(struct aac_dev *dev)
378 if (msi_count > AAC_MAX_MSIX) 378 if (msi_count > AAC_MAX_MSIX)
379 msi_count = AAC_MAX_MSIX; 379 msi_count = AAC_MAX_MSIX;
380 380
381 for (i = 0; i < msi_count; i++)
382 dev->msixentry[i].entry = i;
383
384 if (msi_count > 1 && 381 if (msi_count > 1 &&
385 pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { 382 pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
386 min_msix = 2; 383 min_msix = 2;
387 i = pci_enable_msix_range(dev->pdev, 384 i = pci_alloc_irq_vectors(dev->pdev,
388 dev->msixentry, 385 min_msix, msi_count,
389 min_msix, 386 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
390 msi_count);
391 if (i > 0) { 387 if (i > 0) {
392 dev->msi_enabled = 1; 388 dev->msi_enabled = 1;
393 msi_count = i; 389 msi_count = i;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0aeecec1f5ea..9e7551fe4b19 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2043,30 +2043,22 @@ int aac_acquire_irq(struct aac_dev *dev)
2043 int i; 2043 int i;
2044 int j; 2044 int j;
2045 int ret = 0; 2045 int ret = 0;
2046 int cpu;
2047 2046
2048 cpu = cpumask_first(cpu_online_mask);
2049 if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { 2047 if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
2050 for (i = 0; i < dev->max_msix; i++) { 2048 for (i = 0; i < dev->max_msix; i++) {
2051 dev->aac_msix[i].vector_no = i; 2049 dev->aac_msix[i].vector_no = i;
2052 dev->aac_msix[i].dev = dev; 2050 dev->aac_msix[i].dev = dev;
2053 if (request_irq(dev->msixentry[i].vector, 2051 if (request_irq(pci_irq_vector(dev->pdev, i),
2054 dev->a_ops.adapter_intr, 2052 dev->a_ops.adapter_intr,
2055 0, "aacraid", &(dev->aac_msix[i]))) { 2053 0, "aacraid", &(dev->aac_msix[i]))) {
2056 printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", 2054 printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
2057 dev->name, dev->id, i); 2055 dev->name, dev->id, i);
2058 for (j = 0 ; j < i ; j++) 2056 for (j = 0 ; j < i ; j++)
2059 free_irq(dev->msixentry[j].vector, 2057 free_irq(pci_irq_vector(dev->pdev, j),
2060 &(dev->aac_msix[j])); 2058 &(dev->aac_msix[j]));
2061 pci_disable_msix(dev->pdev); 2059 pci_disable_msix(dev->pdev);
2062 ret = -1; 2060 ret = -1;
2063 } 2061 }
2064 if (irq_set_affinity_hint(dev->msixentry[i].vector,
2065 get_cpu_mask(cpu))) {
2066 printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
2067 dev->name, dev->id, cpu);
2068 }
2069 cpu = cpumask_next(cpu, cpu_online_mask);
2070 } 2062 }
2071 } else { 2063 } else {
2072 dev->aac_msix[0].vector_no = 0; 2064 dev->aac_msix[0].vector_no = 0;
@@ -2096,16 +2088,9 @@ void aac_free_irq(struct aac_dev *dev)
2096 dev->pdev->device == PMC_DEVICE_S8 || 2088 dev->pdev->device == PMC_DEVICE_S8 ||
2097 dev->pdev->device == PMC_DEVICE_S9) { 2089 dev->pdev->device == PMC_DEVICE_S9) {
2098 if (dev->max_msix > 1) { 2090 if (dev->max_msix > 1) {
2099 for (i = 0; i < dev->max_msix; i++) { 2091 for (i = 0; i < dev->max_msix; i++)
2100 if (irq_set_affinity_hint( 2092 free_irq(pci_irq_vector(dev->pdev, i),
2101 dev->msixentry[i].vector, NULL)) { 2093 &(dev->aac_msix[i]));
2102 printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
2103 dev->name, dev->id, cpu);
2104 }
2105 cpu = cpumask_next(cpu, cpu_online_mask);
2106 free_irq(dev->msixentry[i].vector,
2107 &(dev->aac_msix[i]));
2108 }
2109 } else { 2094 } else {
2110 free_irq(dev->pdev->irq, &(dev->aac_msix[0])); 2095 free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
2111 } 2096 }
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 79871f3519ff..e4f3e22fcbd9 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1071,7 +1071,6 @@ static struct scsi_host_template aac_driver_template = {
1071static void __aac_shutdown(struct aac_dev * aac) 1071static void __aac_shutdown(struct aac_dev * aac)
1072{ 1072{
1073 int i; 1073 int i;
1074 int cpu;
1075 1074
1076 aac_send_shutdown(aac); 1075 aac_send_shutdown(aac);
1077 1076
@@ -1087,24 +1086,13 @@ static void __aac_shutdown(struct aac_dev * aac)
1087 kthread_stop(aac->thread); 1086 kthread_stop(aac->thread);
1088 } 1087 }
1089 aac_adapter_disable_int(aac); 1088 aac_adapter_disable_int(aac);
1090 cpu = cpumask_first(cpu_online_mask);
1091 if (aac->pdev->device == PMC_DEVICE_S6 || 1089 if (aac->pdev->device == PMC_DEVICE_S6 ||
1092 aac->pdev->device == PMC_DEVICE_S7 || 1090 aac->pdev->device == PMC_DEVICE_S7 ||
1093 aac->pdev->device == PMC_DEVICE_S8 || 1091 aac->pdev->device == PMC_DEVICE_S8 ||
1094 aac->pdev->device == PMC_DEVICE_S9) { 1092 aac->pdev->device == PMC_DEVICE_S9) {
1095 if (aac->max_msix > 1) { 1093 if (aac->max_msix > 1) {
1096 for (i = 0; i < aac->max_msix; i++) { 1094 for (i = 0; i < aac->max_msix; i++) {
1097 if (irq_set_affinity_hint( 1095 free_irq(pci_irq_vector(aac->pdev, i),
1098 aac->msixentry[i].vector,
1099 NULL)) {
1100 printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
1101 aac->name,
1102 aac->id,
1103 cpu);
1104 }
1105 cpu = cpumask_next(cpu,
1106 cpu_online_mask);
1107 free_irq(aac->msixentry[i].vector,
1108 &(aac->aac_msix[i])); 1096 &(aac->aac_msix[i]));
1109 } 1097 }
1110 } else { 1098 } else {
@@ -1350,7 +1338,7 @@ static void aac_release_resources(struct aac_dev *aac)
1350 aac->pdev->device == PMC_DEVICE_S9) { 1338 aac->pdev->device == PMC_DEVICE_S9) {
1351 if (aac->max_msix > 1) { 1339 if (aac->max_msix > 1) {
1352 for (i = 0; i < aac->max_msix; i++) 1340 for (i = 0; i < aac->max_msix; i++)
1353 free_irq(aac->msixentry[i].vector, 1341 free_irq(pci_irq_vector(aac->pdev, i),
1354 &(aac->aac_msix[i])); 1342 &(aac->aac_msix[i]));
1355 } else { 1343 } else {
1356 free_irq(aac->pdev->irq, &(aac->aac_msix[0])); 1344 free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
@@ -1396,13 +1384,13 @@ static int aac_acquire_resources(struct aac_dev *dev)
1396 dev->aac_msix[i].vector_no = i; 1384 dev->aac_msix[i].vector_no = i;
1397 dev->aac_msix[i].dev = dev; 1385 dev->aac_msix[i].dev = dev;
1398 1386
1399 if (request_irq(dev->msixentry[i].vector, 1387 if (request_irq(pci_irq_vector(dev->pdev, i),
1400 dev->a_ops.adapter_intr, 1388 dev->a_ops.adapter_intr,
1401 0, "aacraid", &(dev->aac_msix[i]))) { 1389 0, "aacraid", &(dev->aac_msix[i]))) {
1402 printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", 1390 printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
1403 name, instance, i); 1391 name, instance, i);
1404 for (j = 0 ; j < i ; j++) 1392 for (j = 0 ; j < i ; j++)
1405 free_irq(dev->msixentry[j].vector, 1393 free_irq(pci_irq_vector(dev->pdev, j),
1406 &(dev->aac_msix[j])); 1394 &(dev->aac_msix[j]));
1407 pci_disable_msix(dev->pdev); 1395 pci_disable_msix(dev->pdev);
1408 goto error_iounmap; 1396 goto error_iounmap;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index febbd83e2ecd..81dd0927246b 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -11030,6 +11030,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
11030 ASC_DBG(2, "AdvInitGetConfig()\n"); 11030 ASC_DBG(2, "AdvInitGetConfig()\n");
11031 11031
11032 ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0; 11032 ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
11033#else
11034 share_irq = 0;
11035 ret = -ENODEV;
11033#endif /* CONFIG_PCI */ 11036#endif /* CONFIG_PCI */
11034 } 11037 }
11035 11038
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 7c713f797535..f2671a8fa7e3 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -228,8 +228,11 @@ static int asd_init_scbs(struct asd_ha_struct *asd_ha)
228 bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8; 228 bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
229 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); 229 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
230 asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); 230 asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
231 if (!asd_ha->seq.tc_index_bitmap) 231 if (!asd_ha->seq.tc_index_bitmap) {
232 kfree(asd_ha->seq.tc_index_array);
233 asd_ha->seq.tc_index_array = NULL;
232 return -ENOMEM; 234 return -ENOMEM;
235 }
233 236
234 spin_lock_init(&seq->tc_index_lock); 237 spin_lock_init(&seq->tc_index_lock);
235 238
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index cf99f8cf4cdd..a254b32eba39 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -629,7 +629,6 @@ struct AdapterControlBlock
629 struct pci_dev * pdev; 629 struct pci_dev * pdev;
630 struct Scsi_Host * host; 630 struct Scsi_Host * host;
631 unsigned long vir2phy_offset; 631 unsigned long vir2phy_offset;
632 struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
633 /* Offset is used in making arc cdb physical to virtual calculations */ 632 /* Offset is used in making arc cdb physical to virtual calculations */
634 uint32_t outbound_int_enable; 633 uint32_t outbound_int_enable;
635 uint32_t cdb_phyaddr_hi32; 634 uint32_t cdb_phyaddr_hi32;
@@ -671,8 +670,6 @@ struct AdapterControlBlock
671 /* iop init */ 670 /* iop init */
672 #define ACB_F_ABORT 0x0200 671 #define ACB_F_ABORT 0x0200
673 #define ACB_F_FIRMWARE_TRAP 0x0400 672 #define ACB_F_FIRMWARE_TRAP 0x0400
674 #define ACB_F_MSI_ENABLED 0x1000
675 #define ACB_F_MSIX_ENABLED 0x2000
676 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; 673 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
677 /* used for memory free */ 674 /* used for memory free */
678 struct list_head ccb_free_list; 675 struct list_head ccb_free_list;
@@ -725,7 +722,7 @@ struct AdapterControlBlock
725 atomic_t rq_map_token; 722 atomic_t rq_map_token;
726 atomic_t ante_token_value; 723 atomic_t ante_token_value;
727 uint32_t maxOutstanding; 724 uint32_t maxOutstanding;
728 int msix_vector_count; 725 int vector_count;
729};/* HW_DEVICE_EXTENSION */ 726};/* HW_DEVICE_EXTENSION */
730/* 727/*
731******************************************************************************* 728*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f0cfb0451757..9e45749d55ed 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -720,51 +720,39 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
720static int 720static int
721arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb) 721arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
722{ 722{
723 int i, j, r; 723 unsigned long flags;
724 struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS]; 724 int nvec, i;
725 725
726 for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++) 726 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
727 entries[i].entry = i; 727 PCI_IRQ_MSIX);
728 r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS); 728 if (nvec > 0) {
729 if (r < 0) 729 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
730 goto msi_int; 730 flags = 0;
731 acb->msix_vector_count = r; 731 } else {
732 for (i = 0; i < r; i++) { 732 nvec = pci_alloc_irq_vectors(pdev, 1, 1,
733 if (request_irq(entries[i].vector, 733 PCI_IRQ_MSI | PCI_IRQ_LEGACY);
734 arcmsr_do_interrupt, 0, "arcmsr", acb)) { 734 if (nvec < 1)
735 return FAILED;
736
737 flags = IRQF_SHARED;
738 }
739
740 acb->vector_count = nvec;
741 for (i = 0; i < nvec; i++) {
742 if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
743 flags, "arcmsr", acb)) {
735 pr_warn("arcmsr%d: request_irq =%d failed!\n", 744 pr_warn("arcmsr%d: request_irq =%d failed!\n",
736 acb->host->host_no, entries[i].vector); 745 acb->host->host_no, pci_irq_vector(pdev, i));
737 for (j = 0 ; j < i ; j++) 746 goto out_free_irq;
738 free_irq(entries[j].vector, acb);
739 pci_disable_msix(pdev);
740 goto msi_int;
741 } 747 }
742 acb->entries[i] = entries[i];
743 }
744 acb->acb_flags |= ACB_F_MSIX_ENABLED;
745 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
746 return SUCCESS;
747msi_int:
748 if (pci_enable_msi_exact(pdev, 1) < 0)
749 goto legacy_int;
750 if (request_irq(pdev->irq, arcmsr_do_interrupt,
751 IRQF_SHARED, "arcmsr", acb)) {
752 pr_warn("arcmsr%d: request_irq =%d failed!\n",
753 acb->host->host_no, pdev->irq);
754 pci_disable_msi(pdev);
755 goto legacy_int;
756 }
757 acb->acb_flags |= ACB_F_MSI_ENABLED;
758 pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
759 return SUCCESS;
760legacy_int:
761 if (request_irq(pdev->irq, arcmsr_do_interrupt,
762 IRQF_SHARED, "arcmsr", acb)) {
763 pr_warn("arcmsr%d: request_irq = %d failed!\n",
764 acb->host->host_no, pdev->irq);
765 return FAILED;
766 } 748 }
749
767 return SUCCESS; 750 return SUCCESS;
751out_free_irq:
752 while (--i >= 0)
753 free_irq(pci_irq_vector(pdev, i), acb);
754 pci_free_irq_vectors(pdev);
755 return FAILED;
768} 756}
769 757
770static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 758static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -886,15 +874,9 @@ static void arcmsr_free_irq(struct pci_dev *pdev,
886{ 874{
887 int i; 875 int i;
888 876
889 if (acb->acb_flags & ACB_F_MSI_ENABLED) { 877 for (i = 0; i < acb->vector_count; i++)
890 free_irq(pdev->irq, acb); 878 free_irq(pci_irq_vector(pdev, i), acb);
891 pci_disable_msi(pdev); 879 pci_free_irq_vectors(pdev);
892 } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
893 for (i = 0; i < acb->msix_vector_count; i++)
894 free_irq(acb->entries[i].vector, acb);
895 pci_disable_msix(pdev);
896 } else
897 free_irq(pdev->irq, acb);
898} 880}
899 881
900static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state) 882static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 8e9cfe8f22f5..a87b99c7fb9a 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -14,49 +14,48 @@
14#include <scsi/scsi_host.h> 14#include <scsi/scsi_host.h>
15 15
16#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 16#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
17#define NCR5380_read(reg) cumanascsi_read(instance, reg) 17#define NCR5380_read(reg) cumanascsi_read(hostdata, reg)
18#define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value) 18#define NCR5380_write(reg, value) cumanascsi_write(hostdata, reg, value)
19 19
20#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) 20#define NCR5380_dma_xfer_len cumanascsi_dma_xfer_len
21#define NCR5380_dma_recv_setup cumanascsi_pread 21#define NCR5380_dma_recv_setup cumanascsi_pread
22#define NCR5380_dma_send_setup cumanascsi_pwrite 22#define NCR5380_dma_send_setup cumanascsi_pwrite
23#define NCR5380_dma_residual(instance) (0) 23#define NCR5380_dma_residual NCR5380_dma_residual_none
24 24
25#define NCR5380_intr cumanascsi_intr 25#define NCR5380_intr cumanascsi_intr
26#define NCR5380_queue_command cumanascsi_queue_command 26#define NCR5380_queue_command cumanascsi_queue_command
27#define NCR5380_info cumanascsi_info 27#define NCR5380_info cumanascsi_info
28 28
29#define NCR5380_implementation_fields \ 29#define NCR5380_implementation_fields \
30 unsigned ctrl; \ 30 unsigned ctrl
31 void __iomem *base; \
32 void __iomem *dma
33 31
34#include "../NCR5380.h" 32struct NCR5380_hostdata;
33static u8 cumanascsi_read(struct NCR5380_hostdata *, unsigned int);
34static void cumanascsi_write(struct NCR5380_hostdata *, unsigned int, u8);
35 35
36void cumanascsi_setup(char *str, int *ints) 36#include "../NCR5380.h"
37{
38}
39 37
40#define CTRL 0x16fc 38#define CTRL 0x16fc
41#define STAT 0x2004 39#define STAT 0x2004
42#define L(v) (((v)<<16)|((v) & 0x0000ffff)) 40#define L(v) (((v)<<16)|((v) & 0x0000ffff))
43#define H(v) (((v)>>16)|((v) & 0xffff0000)) 41#define H(v) (((v)>>16)|((v) & 0xffff0000))
44 42
45static inline int cumanascsi_pwrite(struct Scsi_Host *host, 43static inline int cumanascsi_pwrite(struct NCR5380_hostdata *hostdata,
46 unsigned char *addr, int len) 44 unsigned char *addr, int len)
47{ 45{
48 unsigned long *laddr; 46 unsigned long *laddr;
49 void __iomem *dma = priv(host)->dma + 0x2000; 47 u8 __iomem *base = hostdata->io;
48 u8 __iomem *dma = hostdata->pdma_io + 0x2000;
50 49
51 if(!len) return 0; 50 if(!len) return 0;
52 51
53 writeb(0x02, priv(host)->base + CTRL); 52 writeb(0x02, base + CTRL);
54 laddr = (unsigned long *)addr; 53 laddr = (unsigned long *)addr;
55 while(len >= 32) 54 while(len >= 32)
56 { 55 {
57 unsigned int status; 56 unsigned int status;
58 unsigned long v; 57 unsigned long v;
59 status = readb(priv(host)->base + STAT); 58 status = readb(base + STAT);
60 if(status & 0x80) 59 if(status & 0x80)
61 goto end; 60 goto end;
62 if(!(status & 0x40)) 61 if(!(status & 0x40))
@@ -75,12 +74,12 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
75 } 74 }
76 75
77 addr = (unsigned char *)laddr; 76 addr = (unsigned char *)laddr;
78 writeb(0x12, priv(host)->base + CTRL); 77 writeb(0x12, base + CTRL);
79 78
80 while(len > 0) 79 while(len > 0)
81 { 80 {
82 unsigned int status; 81 unsigned int status;
83 status = readb(priv(host)->base + STAT); 82 status = readb(base + STAT);
84 if(status & 0x80) 83 if(status & 0x80)
85 goto end; 84 goto end;
86 if(status & 0x40) 85 if(status & 0x40)
@@ -90,7 +89,7 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
90 break; 89 break;
91 } 90 }
92 91
93 status = readb(priv(host)->base + STAT); 92 status = readb(base + STAT);
94 if(status & 0x80) 93 if(status & 0x80)
95 goto end; 94 goto end;
96 if(status & 0x40) 95 if(status & 0x40)
@@ -101,27 +100,28 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
101 } 100 }
102 } 101 }
103end: 102end:
104 writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); 103 writeb(hostdata->ctrl | 0x40, base + CTRL);
105 104
106 if (len) 105 if (len)
107 return -1; 106 return -1;
108 return 0; 107 return 0;
109} 108}
110 109
111static inline int cumanascsi_pread(struct Scsi_Host *host, 110static inline int cumanascsi_pread(struct NCR5380_hostdata *hostdata,
112 unsigned char *addr, int len) 111 unsigned char *addr, int len)
113{ 112{
114 unsigned long *laddr; 113 unsigned long *laddr;
115 void __iomem *dma = priv(host)->dma + 0x2000; 114 u8 __iomem *base = hostdata->io;
115 u8 __iomem *dma = hostdata->pdma_io + 0x2000;
116 116
117 if(!len) return 0; 117 if(!len) return 0;
118 118
119 writeb(0x00, priv(host)->base + CTRL); 119 writeb(0x00, base + CTRL);
120 laddr = (unsigned long *)addr; 120 laddr = (unsigned long *)addr;
121 while(len >= 32) 121 while(len >= 32)
122 { 122 {
123 unsigned int status; 123 unsigned int status;
124 status = readb(priv(host)->base + STAT); 124 status = readb(base + STAT);
125 if(status & 0x80) 125 if(status & 0x80)
126 goto end; 126 goto end;
127 if(!(status & 0x40)) 127 if(!(status & 0x40))
@@ -140,12 +140,12 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
140 } 140 }
141 141
142 addr = (unsigned char *)laddr; 142 addr = (unsigned char *)laddr;
143 writeb(0x10, priv(host)->base + CTRL); 143 writeb(0x10, base + CTRL);
144 144
145 while(len > 0) 145 while(len > 0)
146 { 146 {
147 unsigned int status; 147 unsigned int status;
148 status = readb(priv(host)->base + STAT); 148 status = readb(base + STAT);
149 if(status & 0x80) 149 if(status & 0x80)
150 goto end; 150 goto end;
151 if(status & 0x40) 151 if(status & 0x40)
@@ -155,7 +155,7 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
155 break; 155 break;
156 } 156 }
157 157
158 status = readb(priv(host)->base + STAT); 158 status = readb(base + STAT);
159 if(status & 0x80) 159 if(status & 0x80)
160 goto end; 160 goto end;
161 if(status & 0x40) 161 if(status & 0x40)
@@ -166,37 +166,45 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
166 } 166 }
167 } 167 }
168end: 168end:
169 writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); 169 writeb(hostdata->ctrl | 0x40, base + CTRL);
170 170
171 if (len) 171 if (len)
172 return -1; 172 return -1;
173 return 0; 173 return 0;
174} 174}
175 175
176static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg) 176static int cumanascsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
177 struct scsi_cmnd *cmd)
178{
179 return cmd->transfersize;
180}
181
182static u8 cumanascsi_read(struct NCR5380_hostdata *hostdata,
183 unsigned int reg)
177{ 184{
178 void __iomem *base = priv(host)->base; 185 u8 __iomem *base = hostdata->io;
179 unsigned char val; 186 u8 val;
180 187
181 writeb(0, base + CTRL); 188 writeb(0, base + CTRL);
182 189
183 val = readb(base + 0x2100 + (reg << 2)); 190 val = readb(base + 0x2100 + (reg << 2));
184 191
185 priv(host)->ctrl = 0x40; 192 hostdata->ctrl = 0x40;
186 writeb(0x40, base + CTRL); 193 writeb(0x40, base + CTRL);
187 194
188 return val; 195 return val;
189} 196}
190 197
191static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value) 198static void cumanascsi_write(struct NCR5380_hostdata *hostdata,
199 unsigned int reg, u8 value)
192{ 200{
193 void __iomem *base = priv(host)->base; 201 u8 __iomem *base = hostdata->io;
194 202
195 writeb(0, base + CTRL); 203 writeb(0, base + CTRL);
196 204
197 writeb(value, base + 0x2100 + (reg << 2)); 205 writeb(value, base + 0x2100 + (reg << 2));
198 206
199 priv(host)->ctrl = 0x40; 207 hostdata->ctrl = 0x40;
200 writeb(0x40, base + CTRL); 208 writeb(0x40, base + CTRL);
201} 209}
202 210
@@ -235,11 +243,11 @@ static int cumanascsi1_probe(struct expansion_card *ec,
235 goto out_release; 243 goto out_release;
236 } 244 }
237 245
238 priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW), 246 priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
239 ecard_resource_len(ec, ECARD_RES_IOCSLOW)); 247 ecard_resource_len(ec, ECARD_RES_IOCSLOW));
240 priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), 248 priv(host)->pdma_io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
241 ecard_resource_len(ec, ECARD_RES_MEMC)); 249 ecard_resource_len(ec, ECARD_RES_MEMC));
242 if (!priv(host)->base || !priv(host)->dma) { 250 if (!priv(host)->io || !priv(host)->pdma_io) {
243 ret = -ENOMEM; 251 ret = -ENOMEM;
244 goto out_unmap; 252 goto out_unmap;
245 } 253 }
@@ -253,7 +261,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
253 NCR5380_maybe_reset_bus(host); 261 NCR5380_maybe_reset_bus(host);
254 262
255 priv(host)->ctrl = 0; 263 priv(host)->ctrl = 0;
256 writeb(0, priv(host)->base + CTRL); 264 writeb(0, priv(host)->io + CTRL);
257 265
258 ret = request_irq(host->irq, cumanascsi_intr, 0, 266 ret = request_irq(host->irq, cumanascsi_intr, 0,
259 "CumanaSCSI-1", host); 267 "CumanaSCSI-1", host);
@@ -275,8 +283,8 @@ static int cumanascsi1_probe(struct expansion_card *ec,
275 out_exit: 283 out_exit:
276 NCR5380_exit(host); 284 NCR5380_exit(host);
277 out_unmap: 285 out_unmap:
278 iounmap(priv(host)->base); 286 iounmap(priv(host)->io);
279 iounmap(priv(host)->dma); 287 iounmap(priv(host)->pdma_io);
280 scsi_host_put(host); 288 scsi_host_put(host);
281 out_release: 289 out_release:
282 ecard_release_resources(ec); 290 ecard_release_resources(ec);
@@ -287,15 +295,17 @@ static int cumanascsi1_probe(struct expansion_card *ec,
287static void cumanascsi1_remove(struct expansion_card *ec) 295static void cumanascsi1_remove(struct expansion_card *ec)
288{ 296{
289 struct Scsi_Host *host = ecard_get_drvdata(ec); 297 struct Scsi_Host *host = ecard_get_drvdata(ec);
298 void __iomem *base = priv(host)->io;
299 void __iomem *dma = priv(host)->pdma_io;
290 300
291 ecard_set_drvdata(ec, NULL); 301 ecard_set_drvdata(ec, NULL);
292 302
293 scsi_remove_host(host); 303 scsi_remove_host(host);
294 free_irq(host->irq, host); 304 free_irq(host->irq, host);
295 NCR5380_exit(host); 305 NCR5380_exit(host);
296 iounmap(priv(host)->base);
297 iounmap(priv(host)->dma);
298 scsi_host_put(host); 306 scsi_host_put(host);
307 iounmap(base);
308 iounmap(dma);
299 ecard_release_resources(ec); 309 ecard_release_resources(ec);
300} 310}
301 311
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index a396024a3cae..6be6666534d4 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -16,21 +16,18 @@
16 16
17#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 17#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
18 18
19#define NCR5380_read(reg) \ 19#define NCR5380_read(reg) readb(hostdata->io + ((reg) << 2))
20 readb(priv(instance)->base + ((reg) << 2)) 20#define NCR5380_write(reg, value) writeb(value, hostdata->io + ((reg) << 2))
21#define NCR5380_write(reg, value) \
22 writeb(value, priv(instance)->base + ((reg) << 2))
23 21
24#define NCR5380_dma_xfer_len(instance, cmd, phase) (0) 22#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
25#define NCR5380_dma_recv_setup oakscsi_pread 23#define NCR5380_dma_recv_setup oakscsi_pread
26#define NCR5380_dma_send_setup oakscsi_pwrite 24#define NCR5380_dma_send_setup oakscsi_pwrite
27#define NCR5380_dma_residual(instance) (0) 25#define NCR5380_dma_residual NCR5380_dma_residual_none
28 26
29#define NCR5380_queue_command oakscsi_queue_command 27#define NCR5380_queue_command oakscsi_queue_command
30#define NCR5380_info oakscsi_info 28#define NCR5380_info oakscsi_info
31 29
32#define NCR5380_implementation_fields \ 30#define NCR5380_implementation_fields /* none */
33 void __iomem *base
34 31
35#include "../NCR5380.h" 32#include "../NCR5380.h"
36 33
@@ -40,10 +37,10 @@
40#define STAT ((128 + 16) << 2) 37#define STAT ((128 + 16) << 2)
41#define DATA ((128 + 8) << 2) 38#define DATA ((128 + 8) << 2)
42 39
43static inline int oakscsi_pwrite(struct Scsi_Host *instance, 40static inline int oakscsi_pwrite(struct NCR5380_hostdata *hostdata,
44 unsigned char *addr, int len) 41 unsigned char *addr, int len)
45{ 42{
46 void __iomem *base = priv(instance)->base; 43 u8 __iomem *base = hostdata->io;
47 44
48printk("writing %p len %d\n",addr, len); 45printk("writing %p len %d\n",addr, len);
49 46
@@ -55,10 +52,11 @@ printk("writing %p len %d\n",addr, len);
55 return 0; 52 return 0;
56} 53}
57 54
58static inline int oakscsi_pread(struct Scsi_Host *instance, 55static inline int oakscsi_pread(struct NCR5380_hostdata *hostdata,
59 unsigned char *addr, int len) 56 unsigned char *addr, int len)
60{ 57{
61 void __iomem *base = priv(instance)->base; 58 u8 __iomem *base = hostdata->io;
59
62printk("reading %p len %d\n", addr, len); 60printk("reading %p len %d\n", addr, len);
63 while(len > 0) 61 while(len > 0)
64 { 62 {
@@ -133,15 +131,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
133 goto release; 131 goto release;
134 } 132 }
135 133
136 priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), 134 priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
137 ecard_resource_len(ec, ECARD_RES_MEMC)); 135 ecard_resource_len(ec, ECARD_RES_MEMC));
138 if (!priv(host)->base) { 136 if (!priv(host)->io) {
139 ret = -ENOMEM; 137 ret = -ENOMEM;
140 goto unreg; 138 goto unreg;
141 } 139 }
142 140
143 host->irq = NO_IRQ; 141 host->irq = NO_IRQ;
144 host->n_io_port = 255;
145 142
146 ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); 143 ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
147 if (ret) 144 if (ret)
@@ -159,7 +156,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
159 out_exit: 156 out_exit:
160 NCR5380_exit(host); 157 NCR5380_exit(host);
161 out_unmap: 158 out_unmap:
162 iounmap(priv(host)->base); 159 iounmap(priv(host)->io);
163 unreg: 160 unreg:
164 scsi_host_put(host); 161 scsi_host_put(host);
165 release: 162 release:
@@ -171,13 +168,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
171static void oakscsi_remove(struct expansion_card *ec) 168static void oakscsi_remove(struct expansion_card *ec)
172{ 169{
173 struct Scsi_Host *host = ecard_get_drvdata(ec); 170 struct Scsi_Host *host = ecard_get_drvdata(ec);
171 void __iomem *base = priv(host)->io;
174 172
175 ecard_set_drvdata(ec, NULL); 173 ecard_set_drvdata(ec, NULL);
176 scsi_remove_host(host); 174 scsi_remove_host(host);
177 175
178 NCR5380_exit(host); 176 NCR5380_exit(host);
179 iounmap(priv(host)->base);
180 scsi_host_put(host); 177 scsi_host_put(host);
178 iounmap(base);
181 ecard_release_resources(ec); 179 ecard_release_resources(ec);
182} 180}
183 181
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a59ad94ea52b..105b35393ce9 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -57,6 +57,9 @@
57 57
58#define NCR5380_implementation_fields /* none */ 58#define NCR5380_implementation_fields /* none */
59 59
60static u8 (*atari_scsi_reg_read)(unsigned int);
61static void (*atari_scsi_reg_write)(unsigned int, u8);
62
60#define NCR5380_read(reg) atari_scsi_reg_read(reg) 63#define NCR5380_read(reg) atari_scsi_reg_read(reg)
61#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value) 64#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value)
62 65
@@ -64,14 +67,10 @@
64#define NCR5380_abort atari_scsi_abort 67#define NCR5380_abort atari_scsi_abort
65#define NCR5380_info atari_scsi_info 68#define NCR5380_info atari_scsi_info
66 69
67#define NCR5380_dma_recv_setup(instance, data, count) \ 70#define NCR5380_dma_xfer_len atari_scsi_dma_xfer_len
68 atari_scsi_dma_setup(instance, data, count, 0) 71#define NCR5380_dma_recv_setup atari_scsi_dma_recv_setup
69#define NCR5380_dma_send_setup(instance, data, count) \ 72#define NCR5380_dma_send_setup atari_scsi_dma_send_setup
70 atari_scsi_dma_setup(instance, data, count, 1) 73#define NCR5380_dma_residual atari_scsi_dma_residual
71#define NCR5380_dma_residual(instance) \
72 atari_scsi_dma_residual(instance)
73#define NCR5380_dma_xfer_len(instance, cmd, phase) \
74 atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
75 74
76#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance) 75#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance)
77#define NCR5380_release_dma_irq(instance) falcon_release_lock() 76#define NCR5380_release_dma_irq(instance) falcon_release_lock()
@@ -126,9 +125,6 @@ static inline unsigned long SCSI_DMA_GETADR(void)
126 125
127static void atari_scsi_fetch_restbytes(void); 126static void atari_scsi_fetch_restbytes(void);
128 127
129static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
130static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
131
132static unsigned long atari_dma_residual, atari_dma_startaddr; 128static unsigned long atari_dma_residual, atari_dma_startaddr;
133static short atari_dma_active; 129static short atari_dma_active;
134/* pointer to the dribble buffer */ 130/* pointer to the dribble buffer */
@@ -457,15 +453,14 @@ static int __init atari_scsi_setup(char *str)
457__setup("atascsi=", atari_scsi_setup); 453__setup("atascsi=", atari_scsi_setup);
458#endif /* !MODULE */ 454#endif /* !MODULE */
459 455
460 456static unsigned long atari_scsi_dma_setup(struct NCR5380_hostdata *hostdata,
461static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
462 void *data, unsigned long count, 457 void *data, unsigned long count,
463 int dir) 458 int dir)
464{ 459{
465 unsigned long addr = virt_to_phys(data); 460 unsigned long addr = virt_to_phys(data);
466 461
467 dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " 462 dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, dir = %d\n",
468 "dir = %d\n", instance->host_no, data, addr, count, dir); 463 hostdata->host->host_no, data, addr, count, dir);
469 464
470 if (!IS_A_TT() && !STRAM_ADDR(addr)) { 465 if (!IS_A_TT() && !STRAM_ADDR(addr)) {
471 /* If we have a non-DMAable address on a Falcon, use the dribble 466 /* If we have a non-DMAable address on a Falcon, use the dribble
@@ -522,8 +517,19 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
522 return count; 517 return count;
523} 518}
524 519
520static inline int atari_scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
521 unsigned char *data, int count)
522{
523 return atari_scsi_dma_setup(hostdata, data, count, 0);
524}
525
526static inline int atari_scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
527 unsigned char *data, int count)
528{
529 return atari_scsi_dma_setup(hostdata, data, count, 1);
530}
525 531
526static long atari_scsi_dma_residual(struct Scsi_Host *instance) 532static int atari_scsi_dma_residual(struct NCR5380_hostdata *hostdata)
527{ 533{
528 return atari_dma_residual; 534 return atari_dma_residual;
529} 535}
@@ -564,10 +570,11 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd)
564 * the overrun problem, so this question is academic :-) 570 * the overrun problem, so this question is academic :-)
565 */ 571 */
566 572
567static unsigned long atari_dma_xfer_len(unsigned long wanted_len, 573static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
568 struct scsi_cmnd *cmd, int write_flag) 574 struct scsi_cmnd *cmd)
569{ 575{
570 unsigned long possible_len, limit; 576 int wanted_len = cmd->SCp.this_residual;
577 int possible_len, limit;
571 578
572 if (wanted_len < DMA_MIN_SIZE) 579 if (wanted_len < DMA_MIN_SIZE)
573 return 0; 580 return 0;
@@ -604,7 +611,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
604 * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes. 611 * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes.
605 */ 612 */
606 613
607 if (write_flag) { 614 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
608 /* Write operation can always use the DMA, but the transfer size must 615 /* Write operation can always use the DMA, but the transfer size must
609 * be rounded up to the next multiple of 512 (atari_dma_setup() does 616 * be rounded up to the next multiple of 512 (atari_dma_setup() does
610 * this). 617 * this).
@@ -644,8 +651,8 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
644 possible_len = limit; 651 possible_len = limit;
645 652
646 if (possible_len != wanted_len) 653 if (possible_len != wanted_len)
647 dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes " 654 dprintk(NDEBUG_DMA, "DMA transfer now %d bytes instead of %d\n",
648 "instead of %ld\n", possible_len, wanted_len); 655 possible_len, wanted_len);
649 656
650 return possible_len; 657 return possible_len;
651} 658}
@@ -658,26 +665,38 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
658 * NCR5380_write call these functions via function pointers. 665 * NCR5380_write call these functions via function pointers.
659 */ 666 */
660 667
661static unsigned char atari_scsi_tt_reg_read(unsigned char reg) 668static u8 atari_scsi_tt_reg_read(unsigned int reg)
662{ 669{
663 return tt_scsi_regp[reg * 2]; 670 return tt_scsi_regp[reg * 2];
664} 671}
665 672
666static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value) 673static void atari_scsi_tt_reg_write(unsigned int reg, u8 value)
667{ 674{
668 tt_scsi_regp[reg * 2] = value; 675 tt_scsi_regp[reg * 2] = value;
669} 676}
670 677
671static unsigned char atari_scsi_falcon_reg_read(unsigned char reg) 678static u8 atari_scsi_falcon_reg_read(unsigned int reg)
672{ 679{
673 dma_wd.dma_mode_status= (u_short)(0x88 + reg); 680 unsigned long flags;
674 return (u_char)dma_wd.fdc_acces_seccount; 681 u8 result;
682
683 reg += 0x88;
684 local_irq_save(flags);
685 dma_wd.dma_mode_status = (u_short)reg;
686 result = (u8)dma_wd.fdc_acces_seccount;
687 local_irq_restore(flags);
688 return result;
675} 689}
676 690
677static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value) 691static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value)
678{ 692{
679 dma_wd.dma_mode_status = (u_short)(0x88 + reg); 693 unsigned long flags;
694
695 reg += 0x88;
696 local_irq_save(flags);
697 dma_wd.dma_mode_status = (u_short)reg;
680 dma_wd.fdc_acces_seccount = (u_short)value; 698 dma_wd.fdc_acces_seccount = (u_short)value;
699 local_irq_restore(flags);
681} 700}
682 701
683 702
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index d9239c2d49b1..b5112d6d7e73 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3049,8 +3049,10 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3049 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3049 eq_vaddress = pci_alloc_consistent(phba->pcidev,
3050 num_eq_pages * PAGE_SIZE, 3050 num_eq_pages * PAGE_SIZE,
3051 &paddr); 3051 &paddr);
3052 if (!eq_vaddress) 3052 if (!eq_vaddress) {
3053 ret = -ENOMEM;
3053 goto create_eq_error; 3054 goto create_eq_error;
3055 }
3054 3056
3055 mem->va = eq_vaddress; 3057 mem->va = eq_vaddress;
3056 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3058 ret = be_fill_queue(eq, phba->params.num_eq_entries,
@@ -3113,8 +3115,10 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3113 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3115 cq_vaddress = pci_alloc_consistent(phba->pcidev,
3114 num_cq_pages * PAGE_SIZE, 3116 num_cq_pages * PAGE_SIZE,
3115 &paddr); 3117 &paddr);
3116 if (!cq_vaddress) 3118 if (!cq_vaddress) {
3119 ret = -ENOMEM;
3117 goto create_cq_error; 3120 goto create_cq_error;
3121 }
3118 3122
3119 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3123 ret = be_fill_queue(cq, phba->params.num_cq_entries,
3120 sizeof(struct sol_cqe), cq_vaddress); 3124 sizeof(struct sol_cqe), cq_vaddress);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 713745da44c6..0f9fab770339 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -111,20 +111,24 @@ struct bfa_meminfo_s {
111 struct bfa_mem_kva_s kva_info; 111 struct bfa_mem_kva_s kva_info;
112}; 112};
113 113
114/* BFA memory segment setup macros */ 114/* BFA memory segment setup helpers */
115#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \ 115static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo,
116 ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \ 116 struct bfa_mem_dma_s *dm_ptr,
117 if (_seg_sz) \ 117 size_t seg_sz)
118 list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \ 118{
119 &(_meminfo)->dma_info.qe); \ 119 dm_ptr->mem_len = seg_sz;
120} while (0) 120 if (seg_sz)
121 list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe);
122}
121 123
122#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \ 124static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo,
123 ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \ 125 struct bfa_mem_kva_s *kva_ptr,
124 if (_seg_sz) \ 126 size_t seg_sz)
125 list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \ 127{
126 &(_meminfo)->kva_info.qe); \ 128 kva_ptr->mem_len = seg_sz;
127} while (0) 129 if (seg_sz)
130 list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe);
131}
128 132
129/* BFA dma memory segments iterator */ 133/* BFA dma memory segments iterator */
130#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)]) 134#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index d1ad0208dfe7..a9a00169ad91 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3130,11 +3130,12 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
3130} 3130}
3131 3131
3132static int 3132static int
3133bfad_im_bsg_vendor_request(struct fc_bsg_job *job) 3133bfad_im_bsg_vendor_request(struct bsg_job *job)
3134{ 3134{
3135 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0]; 3135 struct fc_bsg_request *bsg_request = job->request;
3136 struct bfad_im_port_s *im_port = 3136 struct fc_bsg_reply *bsg_reply = job->reply;
3137 (struct bfad_im_port_s *) job->shost->hostdata[0]; 3137 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
3138 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
3138 struct bfad_s *bfad = im_port->bfad; 3139 struct bfad_s *bfad = im_port->bfad;
3139 struct request_queue *request_q = job->req->q; 3140 struct request_queue *request_q = job->req->q;
3140 void *payload_kbuf; 3141 void *payload_kbuf;
@@ -3175,18 +3176,19 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
3175 3176
3176 /* Fill the BSG job reply data */ 3177 /* Fill the BSG job reply data */
3177 job->reply_len = job->reply_payload.payload_len; 3178 job->reply_len = job->reply_payload.payload_len;
3178 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 3179 bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
3179 job->reply->result = rc; 3180 bsg_reply->result = rc;
3180 3181
3181 job->job_done(job); 3182 bsg_job_done(job, bsg_reply->result,
3183 bsg_reply->reply_payload_rcv_len);
3182 return rc; 3184 return rc;
3183error: 3185error:
3184 /* free the command buffer */ 3186 /* free the command buffer */
3185 kfree(payload_kbuf); 3187 kfree(payload_kbuf);
3186out: 3188out:
3187 job->reply->result = rc; 3189 bsg_reply->result = rc;
3188 job->reply_len = sizeof(uint32_t); 3190 job->reply_len = sizeof(uint32_t);
3189 job->reply->reply_payload_rcv_len = 0; 3191 bsg_reply->reply_payload_rcv_len = 0;
3190 return rc; 3192 return rc;
3191} 3193}
3192 3194
@@ -3312,7 +3314,7 @@ bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
3312} 3314}
3313 3315
3314int 3316int
3315bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp, 3317bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
3316 bfa_bsg_fcpt_t *bsg_fcpt) 3318 bfa_bsg_fcpt_t *bsg_fcpt)
3317{ 3319{
3318 struct bfa_fcxp_s *hal_fcxp; 3320 struct bfa_fcxp_s *hal_fcxp;
@@ -3352,28 +3354,29 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
3352} 3354}
3353 3355
3354int 3356int
3355bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) 3357bfad_im_bsg_els_ct_request(struct bsg_job *job)
3356{ 3358{
3357 struct bfa_bsg_data *bsg_data; 3359 struct bfa_bsg_data *bsg_data;
3358 struct bfad_im_port_s *im_port = 3360 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
3359 (struct bfad_im_port_s *) job->shost->hostdata[0];
3360 struct bfad_s *bfad = im_port->bfad; 3361 struct bfad_s *bfad = im_port->bfad;
3361 bfa_bsg_fcpt_t *bsg_fcpt; 3362 bfa_bsg_fcpt_t *bsg_fcpt;
3362 struct bfad_fcxp *drv_fcxp; 3363 struct bfad_fcxp *drv_fcxp;
3363 struct bfa_fcs_lport_s *fcs_port; 3364 struct bfa_fcs_lport_s *fcs_port;
3364 struct bfa_fcs_rport_s *fcs_rport; 3365 struct bfa_fcs_rport_s *fcs_rport;
3365 uint32_t command_type = job->request->msgcode; 3366 struct fc_bsg_request *bsg_request = bsg_request;
3367 struct fc_bsg_reply *bsg_reply = job->reply;
3368 uint32_t command_type = bsg_request->msgcode;
3366 unsigned long flags; 3369 unsigned long flags;
3367 struct bfad_buf_info *rsp_buf_info; 3370 struct bfad_buf_info *rsp_buf_info;
3368 void *req_kbuf = NULL, *rsp_kbuf = NULL; 3371 void *req_kbuf = NULL, *rsp_kbuf = NULL;
3369 int rc = -EINVAL; 3372 int rc = -EINVAL;
3370 3373
3371 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ 3374 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
3372 job->reply->reply_payload_rcv_len = 0; 3375 bsg_reply->reply_payload_rcv_len = 0;
3373 3376
3374 /* Get the payload passed in from userspace */ 3377 /* Get the payload passed in from userspace */
3375 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) + 3378 bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
3376 sizeof(struct fc_bsg_request)); 3379 sizeof(struct fc_bsg_request));
3377 if (bsg_data == NULL) 3380 if (bsg_data == NULL)
3378 goto out; 3381 goto out;
3379 3382
@@ -3517,13 +3520,13 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
3517 /* fill the job->reply data */ 3520 /* fill the job->reply data */
3518 if (drv_fcxp->req_status == BFA_STATUS_OK) { 3521 if (drv_fcxp->req_status == BFA_STATUS_OK) {
3519 job->reply_len = drv_fcxp->rsp_len; 3522 job->reply_len = drv_fcxp->rsp_len;
3520 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len; 3523 bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
3521 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 3524 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
3522 } else { 3525 } else {
3523 job->reply->reply_payload_rcv_len = 3526 bsg_reply->reply_payload_rcv_len =
3524 sizeof(struct fc_bsg_ctels_reply); 3527 sizeof(struct fc_bsg_ctels_reply);
3525 job->reply_len = sizeof(uint32_t); 3528 job->reply_len = sizeof(uint32_t);
3526 job->reply->reply_data.ctels_reply.status = 3529 bsg_reply->reply_data.ctels_reply.status =
3527 FC_CTELS_STATUS_REJECT; 3530 FC_CTELS_STATUS_REJECT;
3528 } 3531 }
3529 3532
@@ -3549,20 +3552,23 @@ out_free_mem:
3549 kfree(bsg_fcpt); 3552 kfree(bsg_fcpt);
3550 kfree(drv_fcxp); 3553 kfree(drv_fcxp);
3551out: 3554out:
3552 job->reply->result = rc; 3555 bsg_reply->result = rc;
3553 3556
3554 if (rc == BFA_STATUS_OK) 3557 if (rc == BFA_STATUS_OK)
3555 job->job_done(job); 3558 bsg_job_done(job, bsg_reply->result,
3559 bsg_reply->reply_payload_rcv_len);
3556 3560
3557 return rc; 3561 return rc;
3558} 3562}
3559 3563
3560int 3564int
3561bfad_im_bsg_request(struct fc_bsg_job *job) 3565bfad_im_bsg_request(struct bsg_job *job)
3562{ 3566{
3567 struct fc_bsg_request *bsg_request = job->request;
3568 struct fc_bsg_reply *bsg_reply = job->reply;
3563 uint32_t rc = BFA_STATUS_OK; 3569 uint32_t rc = BFA_STATUS_OK;
3564 3570
3565 switch (job->request->msgcode) { 3571 switch (bsg_request->msgcode) {
3566 case FC_BSG_HST_VENDOR: 3572 case FC_BSG_HST_VENDOR:
3567 /* Process BSG HST Vendor requests */ 3573 /* Process BSG HST Vendor requests */
3568 rc = bfad_im_bsg_vendor_request(job); 3574 rc = bfad_im_bsg_vendor_request(job);
@@ -3575,8 +3581,8 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
3575 rc = bfad_im_bsg_els_ct_request(job); 3581 rc = bfad_im_bsg_els_ct_request(job);
3576 break; 3582 break;
3577 default: 3583 default:
3578 job->reply->result = rc = -EINVAL; 3584 bsg_reply->result = rc = -EINVAL;
3579 job->reply->reply_payload_rcv_len = 0; 3585 bsg_reply->reply_payload_rcv_len = 0;
3580 break; 3586 break;
3581 } 3587 }
3582 3588
@@ -3584,7 +3590,7 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
3584} 3590}
3585 3591
3586int 3592int
3587bfad_im_bsg_timeout(struct fc_bsg_job *job) 3593bfad_im_bsg_timeout(struct bsg_job *job)
3588{ 3594{
3589 /* Don't complete the BSG job request - return -EAGAIN 3595 /* Don't complete the BSG job request - return -EAGAIN
3590 * to reset bsg job timeout : for ELS/CT pass thru we 3596 * to reset bsg job timeout : for ELS/CT pass thru we
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 836fdc221edd..c81ec2a77ef5 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -166,8 +166,8 @@ extern struct device_attribute *bfad_im_vport_attrs[];
166 166
167irqreturn_t bfad_intx(int irq, void *dev_id); 167irqreturn_t bfad_intx(int irq, void *dev_id);
168 168
169int bfad_im_bsg_request(struct fc_bsg_job *job); 169int bfad_im_bsg_request(struct bsg_job *job);
170int bfad_im_bsg_timeout(struct fc_bsg_job *job); 170int bfad_im_bsg_timeout(struct bsg_job *job);
171 171
172/* 172/*
173 * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the 173 * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f9ddb6156f14..0990130821fa 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -970,7 +970,6 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
970 sizeof(struct libfc_function_template)); 970 sizeof(struct libfc_function_template));
971 fc_elsct_init(lport); 971 fc_elsct_init(lport);
972 fc_exch_init(lport); 972 fc_exch_init(lport);
973 fc_rport_init(lport);
974 fc_disc_init(lport); 973 fc_disc_init(lport);
975 fc_disc_config(lport, lport); 974 fc_disc_config(lport, lport);
976 return 0; 975 return 0;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 08ec318afb99..739bfb62aff6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -80,7 +80,6 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
80 struct bnx2fc_rport *tgt, 80 struct bnx2fc_rport *tgt,
81 struct fc_rport_priv *rdata) 81 struct fc_rport_priv *rdata)
82{ 82{
83 struct fc_lport *lport = rdata->local_port;
84 struct fc_rport *rport = rdata->rport; 83 struct fc_rport *rport = rdata->rport;
85 struct bnx2fc_interface *interface = port->priv; 84 struct bnx2fc_interface *interface = port->priv;
86 struct bnx2fc_hba *hba = interface->hba; 85 struct bnx2fc_hba *hba = interface->hba;
@@ -160,7 +159,7 @@ ofld_err:
160tgt_init_err: 159tgt_init_err:
161 if (tgt->fcoe_conn_id != -1) 160 if (tgt->fcoe_conn_id != -1)
162 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); 161 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
163 lport->tt.rport_logoff(rdata); 162 fc_rport_logoff(rdata);
164} 163}
165 164
166void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) 165void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 4655a9f9dcea..9e6f647ff1c1 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1411,7 +1411,7 @@ static int init_act_open(struct cxgbi_sock *csk)
1411 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1411 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1412 if (csk->atid < 0) { 1412 if (csk->atid < 0) {
1413 pr_err("%s, NO atid available.\n", ndev->name); 1413 pr_err("%s, NO atid available.\n", ndev->name);
1414 return -EINVAL; 1414 goto rel_resource_without_clip;
1415 } 1415 }
1416 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1416 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1417 cxgbi_sock_get(csk); 1417 cxgbi_sock_get(csk);
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6e6815545a71..0e9de5d62da2 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -19,6 +19,7 @@
19#include <linux/rwsem.h> 19#include <linux/rwsem.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <scsi/scsi.h> 21#include <scsi/scsi.h>
22#include <scsi/scsi_cmnd.h>
22#include <scsi/scsi_device.h> 23#include <scsi/scsi_device.h>
23 24
24extern const struct file_operations cxlflash_cxl_fops; 25extern const struct file_operations cxlflash_cxl_fops;
@@ -62,11 +63,6 @@ static inline void check_sizes(void)
62/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */ 63/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
63#define CMD_BUFSIZE SIZE_4K 64#define CMD_BUFSIZE SIZE_4K
64 65
65/* flags in IOA status area for host use */
66#define B_DONE 0x01
67#define B_ERROR 0x02 /* set with B_DONE */
68#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */
69
70enum cxlflash_lr_state { 66enum cxlflash_lr_state {
71 LINK_RESET_INVALID, 67 LINK_RESET_INVALID,
72 LINK_RESET_REQUIRED, 68 LINK_RESET_REQUIRED,
@@ -132,12 +128,9 @@ struct cxlflash_cfg {
132struct afu_cmd { 128struct afu_cmd {
133 struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */ 129 struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
134 struct sisl_ioasa sa; /* IOASA must follow IOARCB */ 130 struct sisl_ioasa sa; /* IOASA must follow IOARCB */
135 spinlock_t slock;
136 struct completion cevent;
137 char *buf; /* per command buffer */
138 struct afu *parent; 131 struct afu *parent;
139 int slot; 132 struct scsi_cmnd *scp;
140 atomic_t free; 133 struct completion cevent;
141 134
142 u8 cmd_tmf:1; 135 u8 cmd_tmf:1;
143 136
@@ -147,19 +140,31 @@ struct afu_cmd {
147 */ 140 */
148} __aligned(cache_line_size()); 141} __aligned(cache_line_size());
149 142
143static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
144{
145 return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
146}
147
148static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
149{
150 struct afu_cmd *afuc = sc_to_afuc(sc);
151
152 memset(afuc, 0, sizeof(*afuc));
153 return afuc;
154}
155
150struct afu { 156struct afu {
151 /* Stuff requiring alignment go first. */ 157 /* Stuff requiring alignment go first. */
152 158
153 u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ 159 u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
154 /*
155 * Command & data for AFU commands.
156 */
157 struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
158 160
159 /* Beware of alignment till here. Preferably introduce new 161 /* Beware of alignment till here. Preferably introduce new
160 * fields after this point 162 * fields after this point
161 */ 163 */
162 164
165 int (*send_cmd)(struct afu *, struct afu_cmd *);
166 void (*context_reset)(struct afu_cmd *);
167
163 /* AFU HW */ 168 /* AFU HW */
164 struct cxl_ioctl_start_work work; 169 struct cxl_ioctl_start_work work;
165 struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */ 170 struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
@@ -173,10 +178,10 @@ struct afu {
173 u64 *hrrq_end; 178 u64 *hrrq_end;
174 u64 *hrrq_curr; 179 u64 *hrrq_curr;
175 bool toggle; 180 bool toggle;
176 bool read_room; 181 atomic_t cmds_active; /* Number of currently active AFU commands */
177 atomic64_t room; 182 s64 room;
183 spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
178 u64 hb; 184 u64 hb;
179 u32 cmd_couts; /* Number of command checkouts */
180 u32 internal_lun; /* User-desired LUN mode for this AFU */ 185 u32 internal_lun; /* User-desired LUN mode for this AFU */
181 186
182 char version[16]; 187 char version[16];
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index a0923cade6f3..6c318db90c85 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -254,8 +254,14 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
254 if (lli->parent->mode != MODE_NONE) 254 if (lli->parent->mode != MODE_NONE)
255 rc = -EBUSY; 255 rc = -EBUSY;
256 else { 256 else {
257 /*
258 * Clean up local LUN for this port and reset table
259 * tracking when no more references exist.
260 */
257 sdev->hostdata = NULL; 261 sdev->hostdata = NULL;
258 lli->port_sel &= ~CHAN2PORT(chan); 262 lli->port_sel &= ~CHAN2PORT(chan);
263 if (lli->port_sel == 0U)
264 lli->in_table = false;
259 } 265 }
260 } 266 }
261 267
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b301655f91cd..b17ebf6d0a7e 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -35,67 +35,6 @@ MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
36 36
37/** 37/**
38 * cmd_checkout() - checks out an AFU command
39 * @afu: AFU to checkout from.
40 *
41 * Commands are checked out in a round-robin fashion. Note that since
42 * the command pool is larger than the hardware queue, the majority of
43 * times we will only loop once or twice before getting a command. The
44 * buffer and CDB within the command are initialized (zeroed) prior to
45 * returning.
46 *
47 * Return: The checked out command or NULL when command pool is empty.
48 */
49static struct afu_cmd *cmd_checkout(struct afu *afu)
50{
51 int k, dec = CXLFLASH_NUM_CMDS;
52 struct afu_cmd *cmd;
53
54 while (dec--) {
55 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
56
57 cmd = &afu->cmd[k];
58
59 if (!atomic_dec_if_positive(&cmd->free)) {
60 pr_devel("%s: returning found index=%d cmd=%p\n",
61 __func__, cmd->slot, cmd);
62 memset(cmd->buf, 0, CMD_BUFSIZE);
63 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
64 return cmd;
65 }
66 }
67
68 return NULL;
69}
70
71/**
72 * cmd_checkin() - checks in an AFU command
73 * @cmd: AFU command to checkin.
74 *
75 * Safe to pass commands that have already been checked in. Several
76 * internal tracking fields are reset as part of the checkin. Note
77 * that these are intentionally reset prior to toggling the free bit
78 * to avoid clobbering values in the event that the command is checked
79 * out right away.
80 */
81static void cmd_checkin(struct afu_cmd *cmd)
82{
83 cmd->rcb.scp = NULL;
84 cmd->rcb.timeout = 0;
85 cmd->sa.ioasc = 0;
86 cmd->cmd_tmf = false;
87 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
88
89 if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
90 pr_err("%s: Freeing cmd (%d) that is not in use!\n",
91 __func__, cmd->slot);
92 return;
93 }
94
95 pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
96}
97
98/**
99 * process_cmd_err() - command error handler 38 * process_cmd_err() - command error handler
100 * @cmd: AFU command that experienced the error. 39 * @cmd: AFU command that experienced the error.
101 * @scp: SCSI command associated with the AFU command in error. 40 * @scp: SCSI command associated with the AFU command in error.
@@ -212,7 +151,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
212 * 151 *
213 * Prepares and submits command that has either completed or timed out to 152 * Prepares and submits command that has either completed or timed out to
214 * the SCSI stack. Checks AFU command back into command pool for non-internal 153 * the SCSI stack. Checks AFU command back into command pool for non-internal
215 * (rcb.scp populated) commands. 154 * (cmd->scp populated) commands.
216 */ 155 */
217static void cmd_complete(struct afu_cmd *cmd) 156static void cmd_complete(struct afu_cmd *cmd)
218{ 157{
@@ -222,19 +161,14 @@ static void cmd_complete(struct afu_cmd *cmd)
222 struct cxlflash_cfg *cfg = afu->parent; 161 struct cxlflash_cfg *cfg = afu->parent;
223 bool cmd_is_tmf; 162 bool cmd_is_tmf;
224 163
225 spin_lock_irqsave(&cmd->slock, lock_flags); 164 if (cmd->scp) {
226 cmd->sa.host_use_b[0] |= B_DONE; 165 scp = cmd->scp;
227 spin_unlock_irqrestore(&cmd->slock, lock_flags);
228
229 if (cmd->rcb.scp) {
230 scp = cmd->rcb.scp;
231 if (unlikely(cmd->sa.ioasc)) 166 if (unlikely(cmd->sa.ioasc))
232 process_cmd_err(cmd, scp); 167 process_cmd_err(cmd, scp);
233 else 168 else
234 scp->result = (DID_OK << 16); 169 scp->result = (DID_OK << 16);
235 170
236 cmd_is_tmf = cmd->cmd_tmf; 171 cmd_is_tmf = cmd->cmd_tmf;
237 cmd_checkin(cmd); /* Don't use cmd after here */
238 172
239 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " 173 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
240 "ioasc=%d\n", __func__, scp, scp->result, 174 "ioasc=%d\n", __func__, scp, scp->result,
@@ -254,49 +188,19 @@ static void cmd_complete(struct afu_cmd *cmd)
254} 188}
255 189
256/** 190/**
257 * context_reset() - timeout handler for AFU commands 191 * context_reset_ioarrin() - reset command owner context via IOARRIN register
258 * @cmd: AFU command that timed out. 192 * @cmd: AFU command that timed out.
259 *
260 * Sends a reset to the AFU.
261 */ 193 */
262static void context_reset(struct afu_cmd *cmd) 194static void context_reset_ioarrin(struct afu_cmd *cmd)
263{ 195{
264 int nretry = 0; 196 int nretry = 0;
265 u64 rrin = 0x1; 197 u64 rrin = 0x1;
266 u64 room = 0;
267 struct afu *afu = cmd->parent; 198 struct afu *afu = cmd->parent;
268 ulong lock_flags; 199 struct cxlflash_cfg *cfg = afu->parent;
200 struct device *dev = &cfg->dev->dev;
269 201
270 pr_debug("%s: cmd=%p\n", __func__, cmd); 202 pr_debug("%s: cmd=%p\n", __func__, cmd);
271 203
272 spin_lock_irqsave(&cmd->slock, lock_flags);
273
274 /* Already completed? */
275 if (cmd->sa.host_use_b[0] & B_DONE) {
276 spin_unlock_irqrestore(&cmd->slock, lock_flags);
277 return;
278 }
279
280 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
281 spin_unlock_irqrestore(&cmd->slock, lock_flags);
282
283 /*
284 * We really want to send this reset at all costs, so spread
285 * out wait time on successive retries for available room.
286 */
287 do {
288 room = readq_be(&afu->host_map->cmd_room);
289 atomic64_set(&afu->room, room);
290 if (room)
291 goto write_rrin;
292 udelay(1 << nretry);
293 } while (nretry++ < MC_ROOM_RETRY_CNT);
294
295 pr_err("%s: no cmd_room to send reset\n", __func__);
296 return;
297
298write_rrin:
299 nretry = 0;
300 writeq_be(rrin, &afu->host_map->ioarrin); 204 writeq_be(rrin, &afu->host_map->ioarrin);
301 do { 205 do {
302 rrin = readq_be(&afu->host_map->ioarrin); 206 rrin = readq_be(&afu->host_map->ioarrin);
@@ -305,93 +209,81 @@ write_rrin:
305 /* Double delay each time */ 209 /* Double delay each time */
306 udelay(1 << nretry); 210 udelay(1 << nretry);
307 } while (nretry++ < MC_ROOM_RETRY_CNT); 211 } while (nretry++ < MC_ROOM_RETRY_CNT);
212
213 dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
214 __func__, rrin, nretry);
308} 215}
309 216
310/** 217/**
311 * send_cmd() - sends an AFU command 218 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
312 * @afu: AFU associated with the host. 219 * @afu: AFU associated with the host.
313 * @cmd: AFU command to send. 220 * @cmd: AFU command to send.
314 * 221 *
315 * Return: 222 * Return:
316 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 223 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
317 */ 224 */
318static int send_cmd(struct afu *afu, struct afu_cmd *cmd) 225static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
319{ 226{
320 struct cxlflash_cfg *cfg = afu->parent; 227 struct cxlflash_cfg *cfg = afu->parent;
321 struct device *dev = &cfg->dev->dev; 228 struct device *dev = &cfg->dev->dev;
322 int nretry = 0;
323 int rc = 0; 229 int rc = 0;
324 u64 room; 230 s64 room;
325 long newval; 231 ulong lock_flags;
326 232
327 /* 233 /*
328 * This routine is used by critical users such an AFU sync and to 234 * To avoid the performance penalty of MMIO, spread the update of
329 * send a task management function (TMF). Thus we want to retry a 235 * 'room' over multiple commands.
330 * bit before returning an error. To avoid the performance penalty
331 * of MMIO, we spread the update of 'room' over multiple commands.
332 */ 236 */
333retry: 237 spin_lock_irqsave(&afu->rrin_slock, lock_flags);
334 newval = atomic64_dec_if_positive(&afu->room); 238 if (--afu->room < 0) {
335 if (!newval) { 239 room = readq_be(&afu->host_map->cmd_room);
336 do { 240 if (room <= 0) {
337 room = readq_be(&afu->host_map->cmd_room); 241 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
338 atomic64_set(&afu->room, room); 242 "0x%02X, room=0x%016llX\n",
339 if (room) 243 __func__, cmd->rcb.cdb[0], room);
340 goto write_ioarrin; 244 afu->room = 0;
341 udelay(1 << nretry); 245 rc = SCSI_MLQUEUE_HOST_BUSY;
342 } while (nretry++ < MC_ROOM_RETRY_CNT); 246 goto out;
343
344 dev_err(dev, "%s: no cmd_room to send 0x%X\n",
345 __func__, cmd->rcb.cdb[0]);
346
347 goto no_room;
348 } else if (unlikely(newval < 0)) {
349 /* This should be rare. i.e. Only if two threads race and
350 * decrement before the MMIO read is done. In this case
351 * just benefit from the other thread having updated
352 * afu->room.
353 */
354 if (nretry++ < MC_ROOM_RETRY_CNT) {
355 udelay(1 << nretry);
356 goto retry;
357 } 247 }
358 248 afu->room = room - 1;
359 goto no_room;
360 } 249 }
361 250
362write_ioarrin:
363 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); 251 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
364out: 252out:
253 spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
365 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, 254 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
366 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); 255 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
367 return rc; 256 return rc;
368
369no_room:
370 afu->read_room = true;
371 kref_get(&cfg->afu->mapcount);
372 schedule_work(&cfg->work_q);
373 rc = SCSI_MLQUEUE_HOST_BUSY;
374 goto out;
375} 257}
376 258
377/** 259/**
378 * wait_resp() - polls for a response or timeout to a sent AFU command 260 * wait_resp() - polls for a response or timeout to a sent AFU command
379 * @afu: AFU associated with the host. 261 * @afu: AFU associated with the host.
380 * @cmd: AFU command that was sent. 262 * @cmd: AFU command that was sent.
263 *
264 * Return:
265 * 0 on success, -1 on timeout/error
381 */ 266 */
382static void wait_resp(struct afu *afu, struct afu_cmd *cmd) 267static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
383{ 268{
269 int rc = 0;
384 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); 270 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
385 271
386 timeout = wait_for_completion_timeout(&cmd->cevent, timeout); 272 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
387 if (!timeout) 273 if (!timeout) {
388 context_reset(cmd); 274 afu->context_reset(cmd);
275 rc = -1;
276 }
389 277
390 if (unlikely(cmd->sa.ioasc != 0)) 278 if (unlikely(cmd->sa.ioasc != 0)) {
391 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " 279 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
392 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], 280 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
393 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, 281 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
394 cmd->sa.rc.fc_rc); 282 cmd->sa.rc.fc_rc);
283 rc = -1;
284 }
285
286 return rc;
395} 287}
396 288
397/** 289/**
@@ -405,24 +297,15 @@ static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
405 */ 297 */
406static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) 298static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
407{ 299{
408 struct afu_cmd *cmd;
409
410 u32 port_sel = scp->device->channel + 1; 300 u32 port_sel = scp->device->channel + 1;
411 short lflag = 0;
412 struct Scsi_Host *host = scp->device->host; 301 struct Scsi_Host *host = scp->device->host;
413 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 302 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
303 struct afu_cmd *cmd = sc_to_afucz(scp);
414 struct device *dev = &cfg->dev->dev; 304 struct device *dev = &cfg->dev->dev;
415 ulong lock_flags; 305 ulong lock_flags;
416 int rc = 0; 306 int rc = 0;
417 ulong to; 307 ulong to;
418 308
419 cmd = cmd_checkout(afu);
420 if (unlikely(!cmd)) {
421 dev_err(dev, "%s: could not get a free command\n", __func__);
422 rc = SCSI_MLQUEUE_HOST_BUSY;
423 goto out;
424 }
425
426 /* When Task Management Function is active do not send another */ 309 /* When Task Management Function is active do not send another */
427 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 310 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
428 if (cfg->tmf_active) 311 if (cfg->tmf_active)
@@ -430,28 +313,23 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
430 !cfg->tmf_active, 313 !cfg->tmf_active,
431 cfg->tmf_slock); 314 cfg->tmf_slock);
432 cfg->tmf_active = true; 315 cfg->tmf_active = true;
433 cmd->cmd_tmf = true;
434 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 316 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
435 317
318 cmd->scp = scp;
319 cmd->parent = afu;
320 cmd->cmd_tmf = true;
321
436 cmd->rcb.ctx_id = afu->ctx_hndl; 322 cmd->rcb.ctx_id = afu->ctx_hndl;
323 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
437 cmd->rcb.port_sel = port_sel; 324 cmd->rcb.port_sel = port_sel;
438 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 325 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
439
440 lflag = SISL_REQ_FLAGS_TMF_CMD;
441
442 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 326 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
443 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); 327 SISL_REQ_FLAGS_SUP_UNDERRUN |
444 328 SISL_REQ_FLAGS_TMF_CMD);
445 /* Stash the scp in the reserved field, for reuse during interrupt */
446 cmd->rcb.scp = scp;
447
448 /* Copy the CDB from the cmd passed in */
449 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); 329 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
450 330
451 /* Send the command */ 331 rc = afu->send_cmd(afu, cmd);
452 rc = send_cmd(afu, cmd);
453 if (unlikely(rc)) { 332 if (unlikely(rc)) {
454 cmd_checkin(cmd);
455 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 333 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
456 cfg->tmf_active = false; 334 cfg->tmf_active = false;
457 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 335 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
@@ -507,12 +385,12 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
507 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 385 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
508 struct afu *afu = cfg->afu; 386 struct afu *afu = cfg->afu;
509 struct device *dev = &cfg->dev->dev; 387 struct device *dev = &cfg->dev->dev;
510 struct afu_cmd *cmd; 388 struct afu_cmd *cmd = sc_to_afucz(scp);
389 struct scatterlist *sg = scsi_sglist(scp);
511 u32 port_sel = scp->device->channel + 1; 390 u32 port_sel = scp->device->channel + 1;
512 int nseg, i, ncount; 391 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
513 struct scatterlist *sg;
514 ulong lock_flags; 392 ulong lock_flags;
515 short lflag = 0; 393 int nseg = 0;
516 int rc = 0; 394 int rc = 0;
517 int kref_got = 0; 395 int kref_got = 0;
518 396
@@ -552,55 +430,38 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
552 break; 430 break;
553 } 431 }
554 432
555 cmd = cmd_checkout(afu);
556 if (unlikely(!cmd)) {
557 dev_err(dev, "%s: could not get a free command\n", __func__);
558 rc = SCSI_MLQUEUE_HOST_BUSY;
559 goto out;
560 }
561
562 kref_get(&cfg->afu->mapcount); 433 kref_get(&cfg->afu->mapcount);
563 kref_got = 1; 434 kref_got = 1;
564 435
436 if (likely(sg)) {
437 nseg = scsi_dma_map(scp);
438 if (unlikely(nseg < 0)) {
439 dev_err(dev, "%s: Fail DMA map!\n", __func__);
440 rc = SCSI_MLQUEUE_HOST_BUSY;
441 goto out;
442 }
443
444 cmd->rcb.data_len = sg_dma_len(sg);
445 cmd->rcb.data_ea = sg_dma_address(sg);
446 }
447
448 cmd->scp = scp;
449 cmd->parent = afu;
450
565 cmd->rcb.ctx_id = afu->ctx_hndl; 451 cmd->rcb.ctx_id = afu->ctx_hndl;
452 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
566 cmd->rcb.port_sel = port_sel; 453 cmd->rcb.port_sel = port_sel;
567 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 454 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
568 455
569 if (scp->sc_data_direction == DMA_TO_DEVICE) 456 if (scp->sc_data_direction == DMA_TO_DEVICE)
570 lflag = SISL_REQ_FLAGS_HOST_WRITE; 457 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
571 else
572 lflag = SISL_REQ_FLAGS_HOST_READ;
573
574 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
575 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
576
577 /* Stash the scp in the reserved field, for reuse during interrupt */
578 cmd->rcb.scp = scp;
579
580 nseg = scsi_dma_map(scp);
581 if (unlikely(nseg < 0)) {
582 dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
583 __func__, nseg);
584 rc = SCSI_MLQUEUE_HOST_BUSY;
585 goto out;
586 }
587 458
588 ncount = scsi_sg_count(scp); 459 cmd->rcb.req_flags = req_flags;
589 scsi_for_each_sg(scp, sg, ncount, i) {
590 cmd->rcb.data_len = sg_dma_len(sg);
591 cmd->rcb.data_ea = sg_dma_address(sg);
592 }
593
594 /* Copy the CDB from the scsi_cmnd passed in */
595 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); 460 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
596 461
597 /* Send the command */ 462 rc = afu->send_cmd(afu, cmd);
598 rc = send_cmd(afu, cmd); 463 if (unlikely(rc))
599 if (unlikely(rc)) {
600 cmd_checkin(cmd);
601 scsi_dma_unmap(scp); 464 scsi_dma_unmap(scp);
602 }
603
604out: 465out:
605 if (kref_got) 466 if (kref_got)
606 kref_put(&afu->mapcount, afu_unmap); 467 kref_put(&afu->mapcount, afu_unmap);
@@ -628,17 +489,9 @@ static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
628 */ 489 */
629static void free_mem(struct cxlflash_cfg *cfg) 490static void free_mem(struct cxlflash_cfg *cfg)
630{ 491{
631 int i;
632 char *buf = NULL;
633 struct afu *afu = cfg->afu; 492 struct afu *afu = cfg->afu;
634 493
635 if (cfg->afu) { 494 if (cfg->afu) {
636 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
637 buf = afu->cmd[i].buf;
638 if (!((u64)buf & (PAGE_SIZE - 1)))
639 free_page((ulong)buf);
640 }
641
642 free_pages((ulong)afu, get_order(sizeof(struct afu))); 495 free_pages((ulong)afu, get_order(sizeof(struct afu)));
643 cfg->afu = NULL; 496 cfg->afu = NULL;
644 } 497 }
@@ -650,30 +503,16 @@ static void free_mem(struct cxlflash_cfg *cfg)
650 * 503 *
651 * Safe to call with AFU in a partially allocated/initialized state. 504 * Safe to call with AFU in a partially allocated/initialized state.
652 * 505 *
653 * Cleans up all state associated with the command queue, and unmaps 506 * Waits for any active internal AFU commands to timeout and then unmaps
654 * the MMIO space. 507 * the MMIO space.
655 *
656 * - complete() will take care of commands we initiated (they'll be checked
657 * in as part of the cleanup that occurs after the completion)
658 *
659 * - cmd_checkin() will take care of entries that we did not initiate and that
660 * have not (and will not) complete because they are sitting on a [now stale]
661 * hardware queue
662 */ 508 */
663static void stop_afu(struct cxlflash_cfg *cfg) 509static void stop_afu(struct cxlflash_cfg *cfg)
664{ 510{
665 int i;
666 struct afu *afu = cfg->afu; 511 struct afu *afu = cfg->afu;
667 struct afu_cmd *cmd;
668 512
669 if (likely(afu)) { 513 if (likely(afu)) {
670 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 514 while (atomic_read(&afu->cmds_active))
671 cmd = &afu->cmd[i]; 515 ssleep(1);
672 complete(&cmd->cevent);
673 if (!atomic_read(&cmd->free))
674 cmd_checkin(cmd);
675 }
676
677 if (likely(afu->afu_map)) { 516 if (likely(afu->afu_map)) {
678 cxl_psa_unmap((void __iomem *)afu->afu_map); 517 cxl_psa_unmap((void __iomem *)afu->afu_map);
679 afu->afu_map = NULL; 518 afu->afu_map = NULL;
@@ -886,8 +725,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
886static int alloc_mem(struct cxlflash_cfg *cfg) 725static int alloc_mem(struct cxlflash_cfg *cfg)
887{ 726{
888 int rc = 0; 727 int rc = 0;
889 int i;
890 char *buf = NULL;
891 struct device *dev = &cfg->dev->dev; 728 struct device *dev = &cfg->dev->dev;
892 729
893 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */ 730 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
@@ -901,25 +738,6 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
901 } 738 }
902 cfg->afu->parent = cfg; 739 cfg->afu->parent = cfg;
903 cfg->afu->afu_map = NULL; 740 cfg->afu->afu_map = NULL;
904
905 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
906 if (!((u64)buf & (PAGE_SIZE - 1))) {
907 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
908 if (unlikely(!buf)) {
909 dev_err(dev,
910 "%s: Allocate command buffers fail!\n",
911 __func__);
912 rc = -ENOMEM;
913 free_mem(cfg);
914 goto out;
915 }
916 }
917
918 cfg->afu->cmd[i].buf = buf;
919 atomic_set(&cfg->afu->cmd[i].free, 1);
920 cfg->afu->cmd[i].slot = i;
921 }
922
923out: 741out:
924 return rc; 742 return rc;
925} 743}
@@ -1549,13 +1367,6 @@ static void init_pcr(struct cxlflash_cfg *cfg)
1549 1367
1550 /* Program the Endian Control for the master context */ 1368 /* Program the Endian Control for the master context */
1551 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); 1369 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1552
1553 /* Initialize cmd fields that never change */
1554 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1555 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1556 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1557 afu->cmd[i].rcb.rrq = 0x0;
1558 }
1559} 1370}
1560 1371
1561/** 1372/**
@@ -1644,19 +1455,8 @@ out:
1644static int start_afu(struct cxlflash_cfg *cfg) 1455static int start_afu(struct cxlflash_cfg *cfg)
1645{ 1456{
1646 struct afu *afu = cfg->afu; 1457 struct afu *afu = cfg->afu;
1647 struct afu_cmd *cmd;
1648
1649 int i = 0;
1650 int rc = 0; 1458 int rc = 0;
1651 1459
1652 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1653 cmd = &afu->cmd[i];
1654
1655 init_completion(&cmd->cevent);
1656 spin_lock_init(&cmd->slock);
1657 cmd->parent = afu;
1658 }
1659
1660 init_pcr(cfg); 1460 init_pcr(cfg);
1661 1461
1662 /* After an AFU reset, RRQ entries are stale, clear them */ 1462 /* After an AFU reset, RRQ entries are stale, clear them */
@@ -1829,6 +1629,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
1829 goto err2; 1629 goto err2;
1830 } 1630 }
1831 1631
1632 afu->send_cmd = send_cmd_ioarrin;
1633 afu->context_reset = context_reset_ioarrin;
1634
1832 pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, 1635 pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
1833 afu->version, afu->interface_version); 1636 afu->version, afu->interface_version);
1834 1637
@@ -1840,7 +1643,8 @@ static int init_afu(struct cxlflash_cfg *cfg)
1840 } 1643 }
1841 1644
1842 afu_err_intr_init(cfg->afu); 1645 afu_err_intr_init(cfg->afu);
1843 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); 1646 spin_lock_init(&afu->rrin_slock);
1647 afu->room = readq_be(&afu->host_map->cmd_room);
1844 1648
1845 /* Restore the LUN mappings */ 1649 /* Restore the LUN mappings */
1846 cxlflash_restore_luntable(cfg); 1650 cxlflash_restore_luntable(cfg);
@@ -1884,8 +1688,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1884 struct cxlflash_cfg *cfg = afu->parent; 1688 struct cxlflash_cfg *cfg = afu->parent;
1885 struct device *dev = &cfg->dev->dev; 1689 struct device *dev = &cfg->dev->dev;
1886 struct afu_cmd *cmd = NULL; 1690 struct afu_cmd *cmd = NULL;
1691 char *buf = NULL;
1887 int rc = 0; 1692 int rc = 0;
1888 int retry_cnt = 0;
1889 static DEFINE_MUTEX(sync_active); 1693 static DEFINE_MUTEX(sync_active);
1890 1694
1891 if (cfg->state != STATE_NORMAL) { 1695 if (cfg->state != STATE_NORMAL) {
@@ -1894,27 +1698,23 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1894 } 1698 }
1895 1699
1896 mutex_lock(&sync_active); 1700 mutex_lock(&sync_active);
1897retry: 1701 atomic_inc(&afu->cmds_active);
1898 cmd = cmd_checkout(afu); 1702 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
1899 if (unlikely(!cmd)) { 1703 if (unlikely(!buf)) {
1900 retry_cnt++; 1704 dev_err(dev, "%s: no memory for command\n", __func__);
1901 udelay(1000 * retry_cnt);
1902 if (retry_cnt < MC_RETRY_CNT)
1903 goto retry;
1904 dev_err(dev, "%s: could not get a free command\n", __func__);
1905 rc = -1; 1705 rc = -1;
1906 goto out; 1706 goto out;
1907 } 1707 }
1908 1708
1909 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); 1709 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
1710 init_completion(&cmd->cevent);
1711 cmd->parent = afu;
1910 1712
1911 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); 1713 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1912 1714
1913 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 1715 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1914 cmd->rcb.port_sel = 0x0; /* NA */ 1716 cmd->rcb.ctx_id = afu->ctx_hndl;
1915 cmd->rcb.lun_id = 0x0; /* NA */ 1717 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
1916 cmd->rcb.data_len = 0x0;
1917 cmd->rcb.data_ea = 0x0;
1918 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; 1718 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1919 1719
1920 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ 1720 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
@@ -1924,20 +1724,17 @@ retry:
1924 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); 1724 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1925 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); 1725 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1926 1726
1927 rc = send_cmd(afu, cmd); 1727 rc = afu->send_cmd(afu, cmd);
1928 if (unlikely(rc)) 1728 if (unlikely(rc))
1929 goto out; 1729 goto out;
1930 1730
1931 wait_resp(afu, cmd); 1731 rc = wait_resp(afu, cmd);
1932 1732 if (unlikely(rc))
1933 /* Set on timeout */
1934 if (unlikely((cmd->sa.ioasc != 0) ||
1935 (cmd->sa.host_use_b[0] & B_ERROR)))
1936 rc = -1; 1733 rc = -1;
1937out: 1734out:
1735 atomic_dec(&afu->cmds_active);
1938 mutex_unlock(&sync_active); 1736 mutex_unlock(&sync_active);
1939 if (cmd) 1737 kfree(buf);
1940 cmd_checkin(cmd);
1941 pr_debug("%s: returning rc=%d\n", __func__, rc); 1738 pr_debug("%s: returning rc=%d\n", __func__, rc);
1942 return rc; 1739 return rc;
1943} 1740}
@@ -2376,8 +2173,9 @@ static struct scsi_host_template driver_template = {
2376 .change_queue_depth = cxlflash_change_queue_depth, 2173 .change_queue_depth = cxlflash_change_queue_depth,
2377 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, 2174 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
2378 .can_queue = CXLFLASH_MAX_CMDS, 2175 .can_queue = CXLFLASH_MAX_CMDS,
2176 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
2379 .this_id = -1, 2177 .this_id = -1,
2380 .sg_tablesize = SG_NONE, /* No scatter gather support */ 2178 .sg_tablesize = 1, /* No scatter gather support */
2381 .max_sectors = CXLFLASH_MAX_SECTORS, 2179 .max_sectors = CXLFLASH_MAX_SECTORS,
2382 .use_clustering = ENABLE_CLUSTERING, 2180 .use_clustering = ENABLE_CLUSTERING,
2383 .shost_attrs = cxlflash_host_attrs, 2181 .shost_attrs = cxlflash_host_attrs,
@@ -2412,7 +2210,6 @@ MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2412 * Handles the following events: 2210 * Handles the following events:
2413 * - Link reset which cannot be performed on interrupt context due to 2211 * - Link reset which cannot be performed on interrupt context due to
2414 * blocking up to a few seconds 2212 * blocking up to a few seconds
2415 * - Read AFU command room
2416 * - Rescan the host 2213 * - Rescan the host
2417 */ 2214 */
2418static void cxlflash_worker_thread(struct work_struct *work) 2215static void cxlflash_worker_thread(struct work_struct *work)
@@ -2449,11 +2246,6 @@ static void cxlflash_worker_thread(struct work_struct *work)
2449 cfg->lr_state = LINK_RESET_COMPLETE; 2246 cfg->lr_state = LINK_RESET_COMPLETE;
2450 } 2247 }
2451 2248
2452 if (afu->read_room) {
2453 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2454 afu->read_room = false;
2455 }
2456
2457 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); 2249 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2458 2250
2459 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) 2251 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 347fc1671975..1a2d09c148b3 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,7 +72,7 @@ struct sisl_ioarcb {
72 u16 timeout; /* in units specified by req_flags */ 72 u16 timeout; /* in units specified by req_flags */
73 u32 rsvd1; 73 u32 rsvd1;
74 u8 cdb[16]; /* must be in big endian */ 74 u8 cdb[16]; /* must be in big endian */
75 struct scsi_cmnd *scp; 75 u64 reserved; /* Reserved area */
76} __packed; 76} __packed;
77 77
78struct sisl_rc { 78struct sisl_rc {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index db03c49e2350..d704752b6332 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -95,7 +95,7 @@ struct alua_port_group {
95 95
96struct alua_dh_data { 96struct alua_dh_data {
97 struct list_head node; 97 struct list_head node;
98 struct alua_port_group *pg; 98 struct alua_port_group __rcu *pg;
99 int group_id; 99 int group_id;
100 spinlock_t pg_lock; 100 spinlock_t pg_lock;
101 struct scsi_device *sdev; 101 struct scsi_device *sdev;
@@ -371,7 +371,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
371 371
372 /* Check for existing port group references */ 372 /* Check for existing port group references */
373 spin_lock(&h->pg_lock); 373 spin_lock(&h->pg_lock);
374 old_pg = h->pg; 374 old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
375 if (old_pg != pg) { 375 if (old_pg != pg) {
376 /* port group has changed. Update to new port group */ 376 /* port group has changed. Update to new port group */
377 if (h->pg) { 377 if (h->pg) {
@@ -390,7 +390,9 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
390 list_add_rcu(&h->node, &pg->dh_list); 390 list_add_rcu(&h->node, &pg->dh_list);
391 spin_unlock_irqrestore(&pg->lock, flags); 391 spin_unlock_irqrestore(&pg->lock, flags);
392 392
393 alua_rtpg_queue(h->pg, sdev, NULL, true); 393 alua_rtpg_queue(rcu_dereference_protected(h->pg,
394 lockdep_is_held(&h->pg_lock)),
395 sdev, NULL, true);
394 spin_unlock(&h->pg_lock); 396 spin_unlock(&h->pg_lock);
395 397
396 if (old_pg) 398 if (old_pg)
@@ -942,7 +944,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
942static int alua_set_params(struct scsi_device *sdev, const char *params) 944static int alua_set_params(struct scsi_device *sdev, const char *params)
943{ 945{
944 struct alua_dh_data *h = sdev->handler_data; 946 struct alua_dh_data *h = sdev->handler_data;
945 struct alua_port_group __rcu *pg = NULL; 947 struct alua_port_group *pg = NULL;
946 unsigned int optimize = 0, argc; 948 unsigned int optimize = 0, argc;
947 const char *p = params; 949 const char *p = params;
948 int result = SCSI_DH_OK; 950 int result = SCSI_DH_OK;
@@ -989,7 +991,7 @@ static int alua_activate(struct scsi_device *sdev,
989 struct alua_dh_data *h = sdev->handler_data; 991 struct alua_dh_data *h = sdev->handler_data;
990 int err = SCSI_DH_OK; 992 int err = SCSI_DH_OK;
991 struct alua_queue_data *qdata; 993 struct alua_queue_data *qdata;
992 struct alua_port_group __rcu *pg; 994 struct alua_port_group *pg;
993 995
994 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); 996 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
995 if (!qdata) { 997 if (!qdata) {
@@ -1053,7 +1055,7 @@ static void alua_check(struct scsi_device *sdev, bool force)
1053static int alua_prep_fn(struct scsi_device *sdev, struct request *req) 1055static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
1054{ 1056{
1055 struct alua_dh_data *h = sdev->handler_data; 1057 struct alua_dh_data *h = sdev->handler_data;
1056 struct alua_port_group __rcu *pg; 1058 struct alua_port_group *pg;
1057 unsigned char state = SCSI_ACCESS_STATE_OPTIMAL; 1059 unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
1058 int ret = BLKPREP_OK; 1060 int ret = BLKPREP_OK;
1059 1061
@@ -1123,7 +1125,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
1123 struct alua_port_group *pg; 1125 struct alua_port_group *pg;
1124 1126
1125 spin_lock(&h->pg_lock); 1127 spin_lock(&h->pg_lock);
1126 pg = h->pg; 1128 pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
1127 rcu_assign_pointer(h->pg, NULL); 1129 rcu_assign_pointer(h->pg, NULL);
1128 h->sdev = NULL; 1130 h->sdev = NULL;
1129 spin_unlock(&h->pg_lock); 1131 spin_unlock(&h->pg_lock);
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 9b5a457d4bca..6af3394d051d 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -34,13 +34,13 @@
34 * Definitions for the generic 5380 driver. 34 * Definitions for the generic 5380 driver.
35 */ 35 */
36 36
37#define NCR5380_read(reg) inb(instance->io_port + reg) 37#define NCR5380_read(reg) inb(hostdata->base + (reg))
38#define NCR5380_write(reg, value) outb(value, instance->io_port + reg) 38#define NCR5380_write(reg, value) outb(value, hostdata->base + (reg))
39 39
40#define NCR5380_dma_xfer_len(instance, cmd, phase) (0) 40#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
41#define NCR5380_dma_recv_setup(instance, dst, len) (0) 41#define NCR5380_dma_recv_setup NCR5380_dma_setup_none
42#define NCR5380_dma_send_setup(instance, src, len) (0) 42#define NCR5380_dma_send_setup NCR5380_dma_setup_none
43#define NCR5380_dma_residual(instance) (0) 43#define NCR5380_dma_residual NCR5380_dma_residual_none
44 44
45#define NCR5380_implementation_fields /* none */ 45#define NCR5380_implementation_fields /* none */
46 46
@@ -71,6 +71,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
71 const struct pci_device_id *id) 71 const struct pci_device_id *id)
72{ 72{
73 struct Scsi_Host *shost; 73 struct Scsi_Host *shost;
74 struct NCR5380_hostdata *hostdata;
74 unsigned long io; 75 unsigned long io;
75 int error = -ENODEV; 76 int error = -ENODEV;
76 77
@@ -88,7 +89,9 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
88 sizeof(struct NCR5380_hostdata)); 89 sizeof(struct NCR5380_hostdata));
89 if (!shost) 90 if (!shost)
90 goto out_release_region; 91 goto out_release_region;
91 shost->io_port = io; 92
93 hostdata = shost_priv(shost);
94 hostdata->base = io;
92 95
93 /* This card does not seem to raise an interrupt on pdev->irq. 96 /* This card does not seem to raise an interrupt on pdev->irq.
94 * Steam-powered SCSI controllers run without an IRQ anyway. 97 * Steam-powered SCSI controllers run without an IRQ anyway.
@@ -125,7 +128,8 @@ out_host_put:
125static void dmx3191d_remove_one(struct pci_dev *pdev) 128static void dmx3191d_remove_one(struct pci_dev *pdev)
126{ 129{
127 struct Scsi_Host *shost = pci_get_drvdata(pdev); 130 struct Scsi_Host *shost = pci_get_drvdata(pdev);
128 unsigned long io = shost->io_port; 131 struct NCR5380_hostdata *hostdata = shost_priv(shost);
132 unsigned long io = hostdata->base;
129 133
130 scsi_remove_host(shost); 134 scsi_remove_host(shost);
131 135
@@ -149,18 +153,7 @@ static struct pci_driver dmx3191d_pci_driver = {
149 .remove = dmx3191d_remove_one, 153 .remove = dmx3191d_remove_one,
150}; 154};
151 155
152static int __init dmx3191d_init(void) 156module_pci_driver(dmx3191d_pci_driver);
153{
154 return pci_register_driver(&dmx3191d_pci_driver);
155}
156
157static void __exit dmx3191d_exit(void)
158{
159 pci_unregister_driver(&dmx3191d_pci_driver);
160}
161
162module_init(dmx3191d_init);
163module_exit(dmx3191d_exit);
164 157
165MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); 158MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
166MODULE_DESCRIPTION("Domex DMX3191D SCSI driver"); 159MODULE_DESCRIPTION("Domex DMX3191D SCSI driver");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 21c8d210c456..27c0dce22e72 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -651,7 +651,6 @@ static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
651 } 651 }
652 spin_unlock_irqrestore(pHba->host->host_lock, flags); 652 spin_unlock_irqrestore(pHba->host->host_lock, flags);
653 if (i >= nr) { 653 if (i >= nr) {
654 kfree (reply);
655 printk(KERN_WARNING"%s: Too many outstanding " 654 printk(KERN_WARNING"%s: Too many outstanding "
656 "ioctl commands\n", pHba->name); 655 "ioctl commands\n", pHba->name);
657 return (u32)-1; 656 return (u32)-1;
@@ -1754,8 +1753,10 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1754 sg_offset = (msg[0]>>4)&0xf; 1753 sg_offset = (msg[0]>>4)&0xf;
1755 msg[2] = 0x40000000; // IOCTL context 1754 msg[2] = 0x40000000; // IOCTL context
1756 msg[3] = adpt_ioctl_to_context(pHba, reply); 1755 msg[3] = adpt_ioctl_to_context(pHba, reply);
1757 if (msg[3] == (u32)-1) 1756 if (msg[3] == (u32)-1) {
1757 kfree(reply);
1758 return -EBUSY; 1758 return -EBUSY;
1759 }
1759 1760
1760 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); 1761 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1761 if(sg_offset) { 1762 if(sg_offset) {
@@ -3350,7 +3351,7 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3350 if (opblk_va == NULL) { 3351 if (opblk_va == NULL) {
3351 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen), 3352 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3352 resblk_va, resblk_pa); 3353 resblk_va, resblk_pa);
3353 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n", 3354 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3354 pHba->name); 3355 pHba->name);
3355 return -ENOMEM; 3356 return -ENOMEM;
3356 } 3357 }
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9bd41a35a78a..59150cad0353 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -63,6 +63,14 @@ unsigned int fcoe_debug_logging;
63module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); 63module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
64MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 64MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
65 65
66unsigned int fcoe_e_d_tov = 2 * 1000;
67module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000");
69
70unsigned int fcoe_r_a_tov = 2 * 2 * 1000;
71module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR);
72MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000");
73
66static DEFINE_MUTEX(fcoe_config_mutex); 74static DEFINE_MUTEX(fcoe_config_mutex);
67 75
68static struct workqueue_struct *fcoe_wq; 76static struct workqueue_struct *fcoe_wq;
@@ -582,7 +590,8 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
582 * Use default VLAN for FIP VLAN discovery protocol 590 * Use default VLAN for FIP VLAN discovery protocol
583 */ 591 */
584 frame = (struct fip_frame *)skb->data; 592 frame = (struct fip_frame *)skb->data;
585 if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) && 593 if (ntohs(frame->eth.h_proto) == ETH_P_FIP &&
594 ntohs(frame->fip.fip_op) == FIP_OP_VLAN &&
586 fcoe->realdev != fcoe->netdev) 595 fcoe->realdev != fcoe->netdev)
587 skb->dev = fcoe->realdev; 596 skb->dev = fcoe->realdev;
588 else 597 else
@@ -633,8 +642,8 @@ static int fcoe_lport_config(struct fc_lport *lport)
633 lport->qfull = 0; 642 lport->qfull = 0;
634 lport->max_retry_count = 3; 643 lport->max_retry_count = 3;
635 lport->max_rport_retry_count = 3; 644 lport->max_rport_retry_count = 3;
636 lport->e_d_tov = 2 * 1000; /* FC-FS default */ 645 lport->e_d_tov = fcoe_e_d_tov;
637 lport->r_a_tov = 2 * 2 * 1000; 646 lport->r_a_tov = fcoe_r_a_tov;
638 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 647 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
639 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 648 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
640 lport->does_npiv = 1; 649 lport->does_npiv = 1;
@@ -2160,11 +2169,13 @@ static bool fcoe_match(struct net_device *netdev)
2160 */ 2169 */
2161static void fcoe_dcb_create(struct fcoe_interface *fcoe) 2170static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2162{ 2171{
2172 int ctlr_prio = TC_PRIO_BESTEFFORT;
2173 int fcoe_prio = TC_PRIO_INTERACTIVE;
2174 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2163#ifdef CONFIG_DCB 2175#ifdef CONFIG_DCB
2164 int dcbx; 2176 int dcbx;
2165 u8 fup, up; 2177 u8 fup, up;
2166 struct net_device *netdev = fcoe->realdev; 2178 struct net_device *netdev = fcoe->realdev;
2167 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2168 struct dcb_app app = { 2179 struct dcb_app app = {
2169 .priority = 0, 2180 .priority = 0,
2170 .protocol = ETH_P_FCOE 2181 .protocol = ETH_P_FCOE
@@ -2186,10 +2197,12 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2186 fup = dcb_getapp(netdev, &app); 2197 fup = dcb_getapp(netdev, &app);
2187 } 2198 }
2188 2199
2189 fcoe->priority = ffs(up) ? ffs(up) - 1 : 0; 2200 fcoe_prio = ffs(up) ? ffs(up) - 1 : 0;
2190 ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority; 2201 ctlr_prio = ffs(fup) ? ffs(fup) - 1 : fcoe_prio;
2191 } 2202 }
2192#endif 2203#endif
2204 fcoe->priority = fcoe_prio;
2205 ctlr->priority = ctlr_prio;
2193} 2206}
2194 2207
2195enum fcoe_create_link_state { 2208enum fcoe_create_link_state {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index dcf36537a767..cea57e27e713 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -801,6 +801,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
801 return -EINPROGRESS; 801 return -EINPROGRESS;
802drop: 802drop:
803 kfree_skb(skb); 803 kfree_skb(skb);
804 LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
805 op, ntoh24(fh->fh_d_id));
804 return -EINVAL; 806 return -EINVAL;
805} 807}
806EXPORT_SYMBOL(fcoe_ctlr_els_send); 808EXPORT_SYMBOL(fcoe_ctlr_els_send);
@@ -1316,7 +1318,7 @@ drop:
1316 * The overall length has already been checked. 1318 * The overall length has already been checked.
1317 */ 1319 */
1318static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, 1320static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1319 struct fip_header *fh) 1321 struct sk_buff *skb)
1320{ 1322{
1321 struct fip_desc *desc; 1323 struct fip_desc *desc;
1322 struct fip_mac_desc *mp; 1324 struct fip_mac_desc *mp;
@@ -1331,14 +1333,18 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1331 int num_vlink_desc; 1333 int num_vlink_desc;
1332 int reset_phys_port = 0; 1334 int reset_phys_port = 0;
1333 struct fip_vn_desc **vlink_desc_arr = NULL; 1335 struct fip_vn_desc **vlink_desc_arr = NULL;
1336 struct fip_header *fh = (struct fip_header *)skb->data;
1337 struct ethhdr *eh = eth_hdr(skb);
1334 1338
1335 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); 1339 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
1336 1340
1337 if (!fcf || !lport->port_id) { 1341 if (!fcf) {
1338 /* 1342 /*
1339 * We are yet to select best FCF, but we got CVL in the 1343 * We are yet to select best FCF, but we got CVL in the
1340 * meantime. reset the ctlr and let it rediscover the FCF 1344 * meantime. reset the ctlr and let it rediscover the FCF
1341 */ 1345 */
1346 LIBFCOE_FIP_DBG(fip, "Resetting fcoe_ctlr as FCF has not been "
1347 "selected yet\n");
1342 mutex_lock(&fip->ctlr_mutex); 1348 mutex_lock(&fip->ctlr_mutex);
1343 fcoe_ctlr_reset(fip); 1349 fcoe_ctlr_reset(fip);
1344 mutex_unlock(&fip->ctlr_mutex); 1350 mutex_unlock(&fip->ctlr_mutex);
@@ -1346,6 +1352,31 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1346 } 1352 }
1347 1353
1348 /* 1354 /*
1355 * If we've selected an FCF check that the CVL is from there to avoid
1356 * processing CVLs from an unexpected source. If it is from an
1357 * unexpected source drop it on the floor.
1358 */
1359 if (!ether_addr_equal(eh->h_source, fcf->fcf_mac)) {
1360 LIBFCOE_FIP_DBG(fip, "Dropping CVL due to source address "
1361 "mismatch with FCF src=%pM\n", eh->h_source);
1362 return;
1363 }
1364
1365 /*
1366 * If we haven't logged into the fabric but receive a CVL we should
1367 * reset everything and go back to solicitation.
1368 */
1369 if (!lport->port_id) {
1370 LIBFCOE_FIP_DBG(fip, "lport not logged in, resoliciting\n");
1371 mutex_lock(&fip->ctlr_mutex);
1372 fcoe_ctlr_reset(fip);
1373 mutex_unlock(&fip->ctlr_mutex);
1374 fc_lport_reset(fip->lp);
1375 fcoe_ctlr_solicit(fip, NULL);
1376 return;
1377 }
1378
1379 /*
1349 * mask of required descriptors. Validating each one clears its bit. 1380 * mask of required descriptors. Validating each one clears its bit.
1350 */ 1381 */
1351 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME); 1382 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
@@ -1576,7 +1607,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
1576 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) 1607 if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
1577 fcoe_ctlr_recv_adv(fip, skb); 1608 fcoe_ctlr_recv_adv(fip, skb);
1578 else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) 1609 else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
1579 fcoe_ctlr_recv_clr_vlink(fip, fiph); 1610 fcoe_ctlr_recv_clr_vlink(fip, skb);
1580 kfree_skb(skb); 1611 kfree_skb(skb);
1581 return 0; 1612 return 0;
1582drop: 1613drop:
@@ -2122,7 +2153,7 @@ static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport,
2122 LIBFCOE_FIP_DBG(fip, 2153 LIBFCOE_FIP_DBG(fip,
2123 "rport FLOGI limited port_id %6.6x\n", 2154 "rport FLOGI limited port_id %6.6x\n",
2124 rdata->ids.port_id); 2155 rdata->ids.port_id);
2125 lport->tt.rport_logoff(rdata); 2156 fc_rport_logoff(rdata);
2126 } 2157 }
2127 break; 2158 break;
2128 default: 2159 default:
@@ -2145,9 +2176,15 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
2145{ 2176{
2146 struct fc_rport_priv *rdata; 2177 struct fc_rport_priv *rdata;
2147 2178
2179 rcu_read_lock();
2180 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
2181 if (kref_get_unless_zero(&rdata->kref)) {
2182 fc_rport_logoff(rdata);
2183 kref_put(&rdata->kref, fc_rport_destroy);
2184 }
2185 }
2186 rcu_read_unlock();
2148 mutex_lock(&lport->disc.disc_mutex); 2187 mutex_lock(&lport->disc.disc_mutex);
2149 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
2150 lport->tt.rport_logoff(rdata);
2151 lport->disc.disc_callback = NULL; 2188 lport->disc.disc_callback = NULL;
2152 mutex_unlock(&lport->disc.disc_mutex); 2189 mutex_unlock(&lport->disc.disc_mutex);
2153} 2190}
@@ -2178,7 +2215,7 @@ static void fcoe_ctlr_disc_stop(struct fc_lport *lport)
2178static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport) 2215static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport)
2179{ 2216{
2180 fcoe_ctlr_disc_stop(lport); 2217 fcoe_ctlr_disc_stop(lport);
2181 lport->tt.rport_flush_queue(); 2218 fc_rport_flush_queue();
2182 synchronize_rcu(); 2219 synchronize_rcu();
2183} 2220}
2184 2221
@@ -2393,6 +2430,8 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
2393 switch (fip->state) { 2430 switch (fip->state) {
2394 case FIP_ST_VNMP_CLAIM: 2431 case FIP_ST_VNMP_CLAIM:
2395 case FIP_ST_VNMP_UP: 2432 case FIP_ST_VNMP_UP:
2433 LIBFCOE_FIP_DBG(fip, "vn_probe_req: send reply, state %x\n",
2434 fip->state);
2396 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, 2435 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
2397 frport->enode_mac, 0); 2436 frport->enode_mac, 0);
2398 break; 2437 break;
@@ -2407,15 +2446,21 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
2407 */ 2446 */
2408 if (fip->lp->wwpn > rdata->ids.port_name && 2447 if (fip->lp->wwpn > rdata->ids.port_name &&
2409 !(frport->flags & FIP_FL_REC_OR_P2P)) { 2448 !(frport->flags & FIP_FL_REC_OR_P2P)) {
2449 LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
2450 "port_id collision\n");
2410 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, 2451 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
2411 frport->enode_mac, 0); 2452 frport->enode_mac, 0);
2412 break; 2453 break;
2413 } 2454 }
2414 /* fall through */ 2455 /* fall through */
2415 case FIP_ST_VNMP_START: 2456 case FIP_ST_VNMP_START:
2457 LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
2458 "restart VN2VN negotiation\n");
2416 fcoe_ctlr_vn_restart(fip); 2459 fcoe_ctlr_vn_restart(fip);
2417 break; 2460 break;
2418 default: 2461 default:
2462 LIBFCOE_FIP_DBG(fip, "vn_probe_req: ignore state %x\n",
2463 fip->state);
2419 break; 2464 break;
2420 } 2465 }
2421} 2466}
@@ -2437,9 +2482,12 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
2437 case FIP_ST_VNMP_PROBE1: 2482 case FIP_ST_VNMP_PROBE1:
2438 case FIP_ST_VNMP_PROBE2: 2483 case FIP_ST_VNMP_PROBE2:
2439 case FIP_ST_VNMP_CLAIM: 2484 case FIP_ST_VNMP_CLAIM:
2485 LIBFCOE_FIP_DBG(fip, "vn_probe_reply: restart state %x\n",
2486 fip->state);
2440 fcoe_ctlr_vn_restart(fip); 2487 fcoe_ctlr_vn_restart(fip);
2441 break; 2488 break;
2442 case FIP_ST_VNMP_UP: 2489 case FIP_ST_VNMP_UP:
2490 LIBFCOE_FIP_DBG(fip, "vn_probe_reply: send claim notify\n");
2443 fcoe_ctlr_vn_send_claim(fip); 2491 fcoe_ctlr_vn_send_claim(fip);
2444 break; 2492 break;
2445 default: 2493 default:
@@ -2467,26 +2515,33 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
2467 return; 2515 return;
2468 2516
2469 mutex_lock(&lport->disc.disc_mutex); 2517 mutex_lock(&lport->disc.disc_mutex);
2470 rdata = lport->tt.rport_create(lport, port_id); 2518 rdata = fc_rport_create(lport, port_id);
2471 if (!rdata) { 2519 if (!rdata) {
2472 mutex_unlock(&lport->disc.disc_mutex); 2520 mutex_unlock(&lport->disc.disc_mutex);
2473 return; 2521 return;
2474 } 2522 }
2523 mutex_lock(&rdata->rp_mutex);
2524 mutex_unlock(&lport->disc.disc_mutex);
2475 2525
2476 rdata->ops = &fcoe_ctlr_vn_rport_ops; 2526 rdata->ops = &fcoe_ctlr_vn_rport_ops;
2477 rdata->disc_id = lport->disc.disc_id; 2527 rdata->disc_id = lport->disc.disc_id;
2478 2528
2479 ids = &rdata->ids; 2529 ids = &rdata->ids;
2480 if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || 2530 if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
2481 (ids->node_name != -1 && ids->node_name != new->ids.node_name)) 2531 (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
2482 lport->tt.rport_logoff(rdata); 2532 mutex_unlock(&rdata->rp_mutex);
2533 LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
2534 fc_rport_logoff(rdata);
2535 mutex_lock(&rdata->rp_mutex);
2536 }
2483 ids->port_name = new->ids.port_name; 2537 ids->port_name = new->ids.port_name;
2484 ids->node_name = new->ids.node_name; 2538 ids->node_name = new->ids.node_name;
2485 mutex_unlock(&lport->disc.disc_mutex); 2539 mutex_unlock(&rdata->rp_mutex);
2486 2540
2487 frport = fcoe_ctlr_rport(rdata); 2541 frport = fcoe_ctlr_rport(rdata);
2488 LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n", 2542 LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
2489 port_id, frport->fcoe_len ? "old" : "new"); 2543 port_id, frport->fcoe_len ? "old" : "new",
2544 rdata->rp_state);
2490 *frport = *fcoe_ctlr_rport(new); 2545 *frport = *fcoe_ctlr_rport(new);
2491 frport->time = 0; 2546 frport->time = 0;
2492} 2547}
@@ -2506,12 +2561,12 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
2506 struct fcoe_rport *frport; 2561 struct fcoe_rport *frport;
2507 int ret = -1; 2562 int ret = -1;
2508 2563
2509 rdata = lport->tt.rport_lookup(lport, port_id); 2564 rdata = fc_rport_lookup(lport, port_id);
2510 if (rdata) { 2565 if (rdata) {
2511 frport = fcoe_ctlr_rport(rdata); 2566 frport = fcoe_ctlr_rport(rdata);
2512 memcpy(mac, frport->enode_mac, ETH_ALEN); 2567 memcpy(mac, frport->enode_mac, ETH_ALEN);
2513 ret = 0; 2568 ret = 0;
2514 kref_put(&rdata->kref, lport->tt.rport_destroy); 2569 kref_put(&rdata->kref, fc_rport_destroy);
2515 } 2570 }
2516 return ret; 2571 return ret;
2517} 2572}
@@ -2529,6 +2584,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
2529 struct fcoe_rport *frport = fcoe_ctlr_rport(new); 2584 struct fcoe_rport *frport = fcoe_ctlr_rport(new);
2530 2585
2531 if (frport->flags & FIP_FL_REC_OR_P2P) { 2586 if (frport->flags & FIP_FL_REC_OR_P2P) {
2587 LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
2532 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); 2588 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
2533 return; 2589 return;
2534 } 2590 }
@@ -2536,25 +2592,37 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
2536 case FIP_ST_VNMP_START: 2592 case FIP_ST_VNMP_START:
2537 case FIP_ST_VNMP_PROBE1: 2593 case FIP_ST_VNMP_PROBE1:
2538 case FIP_ST_VNMP_PROBE2: 2594 case FIP_ST_VNMP_PROBE2:
2539 if (new->ids.port_id == fip->port_id) 2595 if (new->ids.port_id == fip->port_id) {
2596 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
2597 "restart, state %d\n",
2598 fip->state);
2540 fcoe_ctlr_vn_restart(fip); 2599 fcoe_ctlr_vn_restart(fip);
2600 }
2541 break; 2601 break;
2542 case FIP_ST_VNMP_CLAIM: 2602 case FIP_ST_VNMP_CLAIM:
2543 case FIP_ST_VNMP_UP: 2603 case FIP_ST_VNMP_UP:
2544 if (new->ids.port_id == fip->port_id) { 2604 if (new->ids.port_id == fip->port_id) {
2545 if (new->ids.port_name > fip->lp->wwpn) { 2605 if (new->ids.port_name > fip->lp->wwpn) {
2606 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
2607 "restart, port_id collision\n");
2546 fcoe_ctlr_vn_restart(fip); 2608 fcoe_ctlr_vn_restart(fip);
2547 break; 2609 break;
2548 } 2610 }
2611 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
2612 "send claim notify\n");
2549 fcoe_ctlr_vn_send_claim(fip); 2613 fcoe_ctlr_vn_send_claim(fip);
2550 break; 2614 break;
2551 } 2615 }
2616 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
2617 new->ids.port_id);
2552 fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, 2618 fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
2553 min((u32)frport->fcoe_len, 2619 min((u32)frport->fcoe_len,
2554 fcoe_ctlr_fcoe_size(fip))); 2620 fcoe_ctlr_fcoe_size(fip)));
2555 fcoe_ctlr_vn_add(fip, new); 2621 fcoe_ctlr_vn_add(fip, new);
2556 break; 2622 break;
2557 default: 2623 default:
2624 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
2625 "ignoring claim from %x\n", new->ids.port_id);
2558 break; 2626 break;
2559 } 2627 }
2560} 2628}
@@ -2591,19 +2659,26 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
2591 2659
2592 frport = fcoe_ctlr_rport(new); 2660 frport = fcoe_ctlr_rport(new);
2593 if (frport->flags & FIP_FL_REC_OR_P2P) { 2661 if (frport->flags & FIP_FL_REC_OR_P2P) {
2662 LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
2594 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); 2663 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
2595 return; 2664 return;
2596 } 2665 }
2597 rdata = lport->tt.rport_lookup(lport, new->ids.port_id); 2666 rdata = fc_rport_lookup(lport, new->ids.port_id);
2598 if (rdata) { 2667 if (rdata) {
2599 if (rdata->ids.node_name == new->ids.node_name && 2668 if (rdata->ids.node_name == new->ids.node_name &&
2600 rdata->ids.port_name == new->ids.port_name) { 2669 rdata->ids.port_name == new->ids.port_name) {
2601 frport = fcoe_ctlr_rport(rdata); 2670 frport = fcoe_ctlr_rport(rdata);
2602 if (!frport->time && fip->state == FIP_ST_VNMP_UP) 2671 LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
2603 lport->tt.rport_login(rdata); 2672 rdata->ids.port_id);
2673 if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
2674 LIBFCOE_FIP_DBG(fip, "beacon expired "
2675 "for rport %x\n",
2676 rdata->ids.port_id);
2677 fc_rport_login(rdata);
2678 }
2604 frport->time = jiffies; 2679 frport->time = jiffies;
2605 } 2680 }
2606 kref_put(&rdata->kref, lport->tt.rport_destroy); 2681 kref_put(&rdata->kref, fc_rport_destroy);
2607 return; 2682 return;
2608 } 2683 }
2609 if (fip->state != FIP_ST_VNMP_UP) 2684 if (fip->state != FIP_ST_VNMP_UP)
@@ -2638,11 +2713,15 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
2638 unsigned long deadline; 2713 unsigned long deadline;
2639 2714
2640 next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10); 2715 next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
2641 mutex_lock(&lport->disc.disc_mutex); 2716 rcu_read_lock();
2642 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { 2717 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
2718 if (!kref_get_unless_zero(&rdata->kref))
2719 continue;
2643 frport = fcoe_ctlr_rport(rdata); 2720 frport = fcoe_ctlr_rport(rdata);
2644 if (!frport->time) 2721 if (!frport->time) {
2722 kref_put(&rdata->kref, fc_rport_destroy);
2645 continue; 2723 continue;
2724 }
2646 deadline = frport->time + 2725 deadline = frport->time +
2647 msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10); 2726 msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10);
2648 if (time_after_eq(jiffies, deadline)) { 2727 if (time_after_eq(jiffies, deadline)) {
@@ -2650,11 +2729,12 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
2650 LIBFCOE_FIP_DBG(fip, 2729 LIBFCOE_FIP_DBG(fip,
2651 "port %16.16llx fc_id %6.6x beacon expired\n", 2730 "port %16.16llx fc_id %6.6x beacon expired\n",
2652 rdata->ids.port_name, rdata->ids.port_id); 2731 rdata->ids.port_name, rdata->ids.port_id);
2653 lport->tt.rport_logoff(rdata); 2732 fc_rport_logoff(rdata);
2654 } else if (time_before(deadline, next_time)) 2733 } else if (time_before(deadline, next_time))
2655 next_time = deadline; 2734 next_time = deadline;
2735 kref_put(&rdata->kref, fc_rport_destroy);
2656 } 2736 }
2657 mutex_unlock(&lport->disc.disc_mutex); 2737 rcu_read_unlock();
2658 return next_time; 2738 return next_time;
2659} 2739}
2660 2740
@@ -2674,11 +2754,21 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
2674 struct fc_rport_priv rdata; 2754 struct fc_rport_priv rdata;
2675 struct fcoe_rport frport; 2755 struct fcoe_rport frport;
2676 } buf; 2756 } buf;
2677 int rc; 2757 int rc, vlan_id = 0;
2678 2758
2679 fiph = (struct fip_header *)skb->data; 2759 fiph = (struct fip_header *)skb->data;
2680 sub = fiph->fip_subcode; 2760 sub = fiph->fip_subcode;
2681 2761
2762 if (fip->lp->vlan)
2763 vlan_id = skb_vlan_tag_get_id(skb);
2764
2765 if (vlan_id && vlan_id != fip->lp->vlan) {
2766 LIBFCOE_FIP_DBG(fip, "vn_recv drop frame sub %x vlan %d\n",
2767 sub, vlan_id);
2768 rc = -EAGAIN;
2769 goto drop;
2770 }
2771
2682 rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); 2772 rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
2683 if (rc) { 2773 if (rc) {
2684 LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); 2774 LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
@@ -2941,7 +3031,7 @@ static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp)
2941 3031
2942 rjt_data.reason = ELS_RJT_UNSUP; 3032 rjt_data.reason = ELS_RJT_UNSUP;
2943 rjt_data.explan = ELS_EXPL_NONE; 3033 rjt_data.explan = ELS_EXPL_NONE;
2944 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); 3034 fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
2945 fc_frame_free(fp); 3035 fc_frame_free(fp);
2946} 3036}
2947 3037
@@ -2991,12 +3081,17 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
2991 mutex_lock(&disc->disc_mutex); 3081 mutex_lock(&disc->disc_mutex);
2992 callback = disc->pending ? disc->disc_callback : NULL; 3082 callback = disc->pending ? disc->disc_callback : NULL;
2993 disc->pending = 0; 3083 disc->pending = 0;
3084 mutex_unlock(&disc->disc_mutex);
3085 rcu_read_lock();
2994 list_for_each_entry_rcu(rdata, &disc->rports, peers) { 3086 list_for_each_entry_rcu(rdata, &disc->rports, peers) {
3087 if (!kref_get_unless_zero(&rdata->kref))
3088 continue;
2995 frport = fcoe_ctlr_rport(rdata); 3089 frport = fcoe_ctlr_rport(rdata);
2996 if (frport->time) 3090 if (frport->time)
2997 lport->tt.rport_login(rdata); 3091 fc_rport_login(rdata);
3092 kref_put(&rdata->kref, fc_rport_destroy);
2998 } 3093 }
2999 mutex_unlock(&disc->disc_mutex); 3094 rcu_read_unlock();
3000 if (callback) 3095 if (callback)
3001 callback(lport, DISC_EV_SUCCESS); 3096 callback(lport, DISC_EV_SUCCESS);
3002} 3097}
@@ -3015,11 +3110,13 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
3015 switch (fip->state) { 3110 switch (fip->state) {
3016 case FIP_ST_VNMP_START: 3111 case FIP_ST_VNMP_START:
3017 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1); 3112 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1);
3113 LIBFCOE_FIP_DBG(fip, "vn_timeout: send 1st probe request\n");
3018 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); 3114 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
3019 next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT); 3115 next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT);
3020 break; 3116 break;
3021 case FIP_ST_VNMP_PROBE1: 3117 case FIP_ST_VNMP_PROBE1:
3022 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2); 3118 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2);
3119 LIBFCOE_FIP_DBG(fip, "vn_timeout: send 2nd probe request\n");
3023 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); 3120 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
3024 next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); 3121 next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
3025 break; 3122 break;
@@ -3030,6 +3127,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
3030 hton24(mac + 3, new_port_id); 3127 hton24(mac + 3, new_port_id);
3031 fcoe_ctlr_map_dest(fip); 3128 fcoe_ctlr_map_dest(fip);
3032 fip->update_mac(fip->lp, mac); 3129 fip->update_mac(fip->lp, mac);
3130 LIBFCOE_FIP_DBG(fip, "vn_timeout: send claim notify\n");
3033 fcoe_ctlr_vn_send_claim(fip); 3131 fcoe_ctlr_vn_send_claim(fip);
3034 next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); 3132 next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
3035 break; 3133 break;
@@ -3041,6 +3139,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
3041 next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT); 3139 next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT);
3042 if (time_after_eq(jiffies, next_time)) { 3140 if (time_after_eq(jiffies, next_time)) {
3043 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP); 3141 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP);
3142 LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
3044 fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, 3143 fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
3045 fcoe_all_vn2vn, 0); 3144 fcoe_all_vn2vn, 0);
3046 next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); 3145 next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
@@ -3051,6 +3150,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
3051 case FIP_ST_VNMP_UP: 3150 case FIP_ST_VNMP_UP:
3052 next_time = fcoe_ctlr_vn_age(fip); 3151 next_time = fcoe_ctlr_vn_age(fip);
3053 if (time_after_eq(jiffies, fip->port_ka_time)) { 3152 if (time_after_eq(jiffies, fip->port_ka_time)) {
3153 LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
3054 fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, 3154 fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
3055 fcoe_all_vn2vn, 0); 3155 fcoe_all_vn2vn, 0);
3056 fip->port_ka_time = jiffies + 3156 fip->port_ka_time = jiffies +
@@ -3135,7 +3235,6 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
3135 fc_exch_init(lport); 3235 fc_exch_init(lport);
3136 fc_elsct_init(lport); 3236 fc_elsct_init(lport);
3137 fc_lport_init(lport); 3237 fc_lport_init(lport);
3138 fc_rport_init(lport);
3139 fc_disc_init(lport); 3238 fc_disc_init(lport);
3140 fcoe_ctlr_mode_set(lport, fip, fip->mode); 3239 fcoe_ctlr_mode_set(lport, fip, fip->mode);
3141 return 0; 3240 return 0;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 0675fd128734..9cf3d56296ab 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -335,16 +335,24 @@ static ssize_t store_ctlr_enabled(struct device *dev,
335 const char *buf, size_t count) 335 const char *buf, size_t count)
336{ 336{
337 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 337 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
338 bool enabled;
338 int rc; 339 int rc;
339 340
341 if (*buf == '1')
342 enabled = true;
343 else if (*buf == '0')
344 enabled = false;
345 else
346 return -EINVAL;
347
340 switch (ctlr->enabled) { 348 switch (ctlr->enabled) {
341 case FCOE_CTLR_ENABLED: 349 case FCOE_CTLR_ENABLED:
342 if (*buf == '1') 350 if (enabled)
343 return count; 351 return count;
344 ctlr->enabled = FCOE_CTLR_DISABLED; 352 ctlr->enabled = FCOE_CTLR_DISABLED;
345 break; 353 break;
346 case FCOE_CTLR_DISABLED: 354 case FCOE_CTLR_DISABLED:
347 if (*buf == '0') 355 if (!enabled)
348 return count; 356 return count;
349 ctlr->enabled = FCOE_CTLR_ENABLED; 357 ctlr->enabled = FCOE_CTLR_ENABLED;
350 break; 358 break;
@@ -424,6 +432,75 @@ static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR,
424 store_ctlr_fip_resp); 432 store_ctlr_fip_resp);
425 433
426static ssize_t 434static ssize_t
435fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count)
436{
437 int err;
438 unsigned long v;
439
440 err = kstrtoul(buf, 10, &v);
441 if (err || v > UINT_MAX)
442 return -EINVAL;
443
444 *var = v;
445
446 return count;
447}
448
449static ssize_t store_ctlr_r_a_tov(struct device *dev,
450 struct device_attribute *attr,
451 const char *buf, size_t count)
452{
453 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
454 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
455
456 if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
457 return -EBUSY;
458 if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
459 return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count);
460 return -ENOTSUPP;
461}
462
463static ssize_t show_ctlr_r_a_tov(struct device *dev,
464 struct device_attribute *attr,
465 char *buf)
466{
467 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
468 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
469
470 return sprintf(buf, "%d\n", ctlr->lp->r_a_tov);
471}
472
473static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR,
474 show_ctlr_r_a_tov, store_ctlr_r_a_tov);
475
476static ssize_t store_ctlr_e_d_tov(struct device *dev,
477 struct device_attribute *attr,
478 const char *buf, size_t count)
479{
480 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
481 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
482
483 if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
484 return -EBUSY;
485 if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
486 return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count);
487 return -ENOTSUPP;
488}
489
490static ssize_t show_ctlr_e_d_tov(struct device *dev,
491 struct device_attribute *attr,
492 char *buf)
493{
494 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
495 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
496
497 return sprintf(buf, "%d\n", ctlr->lp->e_d_tov);
498}
499
500static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR,
501 show_ctlr_e_d_tov, store_ctlr_e_d_tov);
502
503static ssize_t
427store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, 504store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
428 struct device_attribute *attr, 505 struct device_attribute *attr,
429 const char *buf, size_t count) 506 const char *buf, size_t count)
@@ -507,6 +584,8 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
507static struct attribute *fcoe_ctlr_attrs[] = { 584static struct attribute *fcoe_ctlr_attrs[] = {
508 &device_attr_fcoe_ctlr_fip_vlan_responder.attr, 585 &device_attr_fcoe_ctlr_fip_vlan_responder.attr,
509 &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, 586 &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
587 &device_attr_fcoe_ctlr_r_a_tov.attr,
588 &device_attr_fcoe_ctlr_e_d_tov.attr,
510 &device_attr_fcoe_ctlr_enabled.attr, 589 &device_attr_fcoe_ctlr_enabled.attr,
511 &device_attr_fcoe_ctlr_mode.attr, 590 &device_attr_fcoe_ctlr_mode.attr,
512 NULL, 591 NULL,
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d9fd2f841585..2544a37ece0a 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -441,30 +441,38 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
441 unsigned long ptr; 441 unsigned long ptr;
442 spinlock_t *io_lock = NULL; 442 spinlock_t *io_lock = NULL;
443 int io_lock_acquired = 0; 443 int io_lock_acquired = 0;
444 struct fc_rport_libfc_priv *rp;
444 445
445 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) 446 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
446 return SCSI_MLQUEUE_HOST_BUSY; 447 return SCSI_MLQUEUE_HOST_BUSY;
447 448
448 rport = starget_to_rport(scsi_target(sc->device)); 449 rport = starget_to_rport(scsi_target(sc->device));
450 if (!rport) {
451 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
452 "returning DID_NO_CONNECT for IO as rport is NULL\n");
453 sc->result = DID_NO_CONNECT << 16;
454 done(sc);
455 return 0;
456 }
457
449 ret = fc_remote_port_chkready(rport); 458 ret = fc_remote_port_chkready(rport);
450 if (ret) { 459 if (ret) {
460 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
461 "rport is not ready\n");
451 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); 462 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
452 sc->result = ret; 463 sc->result = ret;
453 done(sc); 464 done(sc);
454 return 0; 465 return 0;
455 } 466 }
456 467
457 if (rport) { 468 rp = rport->dd_data;
458 struct fc_rport_libfc_priv *rp = rport->dd_data; 469 if (!rp || rp->rp_state != RPORT_ST_READY) {
459 470 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
460 if (!rp || rp->rp_state != RPORT_ST_READY) {
461 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
462 "returning DID_NO_CONNECT for IO as rport is removed\n"); 471 "returning DID_NO_CONNECT for IO as rport is removed\n");
463 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); 472 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
464 sc->result = DID_NO_CONNECT<<16; 473 sc->result = DID_NO_CONNECT<<16;
465 done(sc); 474 done(sc);
466 return 0; 475 return 0;
467 }
468 } 476 }
469 477
470 if (lp->state != LPORT_ST_READY || !(lp->link_up)) 478 if (lp->state != LPORT_ST_READY || !(lp->link_up))
@@ -2543,7 +2551,7 @@ int fnic_reset(struct Scsi_Host *shost)
2543 * Reset local port, this will clean up libFC exchanges, 2551 * Reset local port, this will clean up libFC exchanges,
2544 * reset remote port sessions, and if link is up, begin flogi 2552 * reset remote port sessions, and if link is up, begin flogi
2545 */ 2553 */
2546 ret = lp->tt.lport_reset(lp); 2554 ret = fc_lport_reset(lp);
2547 2555
2548 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 2556 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2549 "Returning from fnic reset %s\n", 2557 "Returning from fnic reset %s\n",
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4e15c4bf0795..5a5fa01576b7 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -613,7 +613,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
613 fc_trace_entries.rd_idx = 0; 613 fc_trace_entries.rd_idx = 0;
614 } 614 }
615 615
616 fc_buf->time_stamp = CURRENT_TIME; 616 ktime_get_real_ts64(&fc_buf->time_stamp);
617 fc_buf->host_no = host_no; 617 fc_buf->host_no = host_no;
618 fc_buf->frame_type = frame_type; 618 fc_buf->frame_type = frame_type;
619 619
@@ -740,7 +740,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
740 740
741 len = *orig_len; 741 len = *orig_len;
742 742
743 time_to_tm(tdata->time_stamp.tv_sec, 0, &tm); 743 time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
744 744
745 fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; 745 fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
746 len += snprintf(fnic_dbgfs_prt->buffer + len, 746 len += snprintf(fnic_dbgfs_prt->buffer + len,
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index a8aa0578fcb0..e375d0c2eaaf 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -72,7 +72,7 @@ struct fnic_trace_data {
72typedef struct fnic_trace_data fnic_trace_data_t; 72typedef struct fnic_trace_data fnic_trace_data_t;
73 73
74struct fc_trace_hdr { 74struct fc_trace_hdr {
75 struct timespec time_stamp; 75 struct timespec64 time_stamp;
76 u32 host_no; 76 u32 host_no;
77 u8 frame_type; 77 u8 frame_type;
78 u8 frame_len; 78 u8 frame_len;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 9795d6f3e197..ba69d6112fa1 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -499,10 +499,7 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
499 499
500 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 500 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
501 if (err) 501 if (err)
502 printk(KERN_ERR 502 pr_err("Can't add addr [%pM], %d\n", addr, err);
503 "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
504 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
505 err);
506} 503}
507 504
508void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) 505void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
@@ -517,10 +514,7 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
517 514
518 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 515 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
519 if (err) 516 if (err)
520 printk(KERN_ERR 517 pr_err("Can't del addr [%pM], %d\n", addr, err);
521 "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
522 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
523 err);
524} 518}
525 519
526int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) 520int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index cbf010324c18..de5147a8c959 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -64,9 +64,9 @@ static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
64module_param_array(card, int, NULL, 0); 64module_param_array(card, int, NULL, 0);
65MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)"); 65MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
66 66
67MODULE_ALIAS("g_NCR5380_mmio");
67MODULE_LICENSE("GPL"); 68MODULE_LICENSE("GPL");
68 69
69#ifndef SCSI_G_NCR5380_MEM
70/* 70/*
71 * Configure I/O address of 53C400A or DTC436 by writing magic numbers 71 * Configure I/O address of 53C400A or DTC436 by writing magic numbers
72 * to ports 0x779 and 0x379. 72 * to ports 0x779 and 0x379.
@@ -88,40 +88,35 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
88 cfg = 0x80 | idx | (irq << 4); 88 cfg = 0x80 | idx | (irq << 4);
89 outb(cfg, 0x379); 89 outb(cfg, 0x379);
90} 90}
91#endif 91
92static unsigned int ncr_53c400a_ports[] = {
93 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
94};
95static unsigned int dtc_3181e_ports[] = {
96 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
97};
98static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
99 0x59, 0xb9, 0xc5, 0xae, 0xa6
100};
101static u8 hp_c2502_magic[] = { /* HP C2502 */
102 0x0f, 0x22, 0xf0, 0x20, 0x80
103};
92 104
93static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, 105static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
94 struct device *pdev, int base, int irq, int board) 106 struct device *pdev, int base, int irq, int board)
95{ 107{
96 unsigned int *ports; 108 bool is_pmio = base <= 0xffff;
109 int ret;
110 int flags = 0;
111 unsigned int *ports = NULL;
97 u8 *magic = NULL; 112 u8 *magic = NULL;
98#ifndef SCSI_G_NCR5380_MEM
99 int i; 113 int i;
100 int port_idx = -1; 114 int port_idx = -1;
101 unsigned long region_size; 115 unsigned long region_size;
102#endif
103 static unsigned int ncr_53c400a_ports[] = {
104 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
105 };
106 static unsigned int dtc_3181e_ports[] = {
107 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
108 };
109 static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
110 0x59, 0xb9, 0xc5, 0xae, 0xa6
111 };
112 static u8 hp_c2502_magic[] = { /* HP C2502 */
113 0x0f, 0x22, 0xf0, 0x20, 0x80
114 };
115 int flags, ret;
116 struct Scsi_Host *instance; 116 struct Scsi_Host *instance;
117 struct NCR5380_hostdata *hostdata; 117 struct NCR5380_hostdata *hostdata;
118#ifdef SCSI_G_NCR5380_MEM 118 u8 __iomem *iomem;
119 void __iomem *iomem;
120 resource_size_t iomem_size;
121#endif
122 119
123 ports = NULL;
124 flags = 0;
125 switch (board) { 120 switch (board) {
126 case BOARD_NCR5380: 121 case BOARD_NCR5380:
127 flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; 122 flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
@@ -140,8 +135,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
140 break; 135 break;
141 } 136 }
142 137
143#ifndef SCSI_G_NCR5380_MEM 138 if (is_pmio && ports && magic) {
144 if (ports && magic) {
145 /* wakeup sequence for the NCR53C400A and DTC3181E */ 139 /* wakeup sequence for the NCR53C400A and DTC3181E */
146 140
147 /* Disable the adapter and look for a free io port */ 141 /* Disable the adapter and look for a free io port */
@@ -170,84 +164,89 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
170 if (ports[i]) { 164 if (ports[i]) {
171 /* At this point we have our region reserved */ 165 /* At this point we have our region reserved */
172 magic_configure(i, 0, magic); /* no IRQ yet */ 166 magic_configure(i, 0, magic); /* no IRQ yet */
173 outb(0xc0, ports[i] + 9); 167 base = ports[i];
174 if (inb(ports[i] + 9) != 0x80) { 168 outb(0xc0, base + 9);
169 if (inb(base + 9) != 0x80) {
175 ret = -ENODEV; 170 ret = -ENODEV;
176 goto out_release; 171 goto out_release;
177 } 172 }
178 base = ports[i];
179 port_idx = i; 173 port_idx = i;
180 } else 174 } else
181 return -EINVAL; 175 return -EINVAL;
182 } 176 } else if (is_pmio) {
183 else
184 {
185 /* NCR5380 - no configuration, just grab */ 177 /* NCR5380 - no configuration, just grab */
186 region_size = 8; 178 region_size = 8;
187 if (!base || !request_region(base, region_size, "ncr5380")) 179 if (!base || !request_region(base, region_size, "ncr5380"))
188 return -EBUSY; 180 return -EBUSY;
181 } else { /* MMIO */
182 region_size = NCR53C400_region_size;
183 if (!request_mem_region(base, region_size, "ncr5380"))
184 return -EBUSY;
189 } 185 }
190#else 186
191 iomem_size = NCR53C400_region_size; 187 if (is_pmio)
192 if (!request_mem_region(base, iomem_size, "ncr5380")) 188 iomem = ioport_map(base, region_size);
193 return -EBUSY; 189 else
194 iomem = ioremap(base, iomem_size); 190 iomem = ioremap(base, region_size);
191
195 if (!iomem) { 192 if (!iomem) {
196 release_mem_region(base, iomem_size); 193 ret = -ENOMEM;
197 return -ENOMEM; 194 goto out_release;
198 } 195 }
199#endif 196
200 instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata)); 197 instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
201 if (instance == NULL) { 198 if (instance == NULL) {
202 ret = -ENOMEM; 199 ret = -ENOMEM;
203 goto out_release; 200 goto out_unmap;
204 } 201 }
205 hostdata = shost_priv(instance); 202 hostdata = shost_priv(instance);
206 203
207#ifndef SCSI_G_NCR5380_MEM 204 hostdata->io = iomem;
208 instance->io_port = base; 205 hostdata->region_size = region_size;
209 instance->n_io_port = region_size; 206
210 hostdata->io_width = 1; /* 8-bit PDMA by default */ 207 if (is_pmio) {
211 208 hostdata->io_port = base;
212 /* 209 hostdata->io_width = 1; /* 8-bit PDMA by default */
213 * On NCR53C400 boards, NCR5380 registers are mapped 8 past 210 hostdata->offset = 0;
214 * the base address. 211
215 */ 212 /*
216 switch (board) { 213 * On NCR53C400 boards, NCR5380 registers are mapped 8 past
217 case BOARD_NCR53C400: 214 * the base address.
218 instance->io_port += 8; 215 */
219 hostdata->c400_ctl_status = 0; 216 switch (board) {
220 hostdata->c400_blk_cnt = 1; 217 case BOARD_NCR53C400:
221 hostdata->c400_host_buf = 4; 218 hostdata->io_port += 8;
222 break; 219 hostdata->c400_ctl_status = 0;
223 case BOARD_DTC3181E: 220 hostdata->c400_blk_cnt = 1;
224 hostdata->io_width = 2; /* 16-bit PDMA */ 221 hostdata->c400_host_buf = 4;
225 /* fall through */ 222 break;
226 case BOARD_NCR53C400A: 223 case BOARD_DTC3181E:
227 case BOARD_HP_C2502: 224 hostdata->io_width = 2; /* 16-bit PDMA */
228 hostdata->c400_ctl_status = 9; 225 /* fall through */
229 hostdata->c400_blk_cnt = 10; 226 case BOARD_NCR53C400A:
230 hostdata->c400_host_buf = 8; 227 case BOARD_HP_C2502:
231 break; 228 hostdata->c400_ctl_status = 9;
232 } 229 hostdata->c400_blk_cnt = 10;
233#else 230 hostdata->c400_host_buf = 8;
234 instance->base = base; 231 break;
235 hostdata->iomem = iomem; 232 }
236 hostdata->iomem_size = iomem_size; 233 } else {
237 switch (board) { 234 hostdata->base = base;
238 case BOARD_NCR53C400: 235 hostdata->offset = NCR53C400_mem_base;
239 hostdata->c400_ctl_status = 0x100; 236 switch (board) {
240 hostdata->c400_blk_cnt = 0x101; 237 case BOARD_NCR53C400:
241 hostdata->c400_host_buf = 0x104; 238 hostdata->c400_ctl_status = 0x100;
242 break; 239 hostdata->c400_blk_cnt = 0x101;
243 case BOARD_DTC3181E: 240 hostdata->c400_host_buf = 0x104;
244 case BOARD_NCR53C400A: 241 break;
245 case BOARD_HP_C2502: 242 case BOARD_DTC3181E:
246 pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); 243 case BOARD_NCR53C400A:
247 ret = -EINVAL; 244 case BOARD_HP_C2502:
248 goto out_unregister; 245 pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
246 ret = -EINVAL;
247 goto out_unregister;
248 }
249 } 249 }
250#endif
251 250
252 ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP); 251 ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
253 if (ret) 252 if (ret)
@@ -273,11 +272,9 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
273 instance->irq = NO_IRQ; 272 instance->irq = NO_IRQ;
274 273
275 if (instance->irq != NO_IRQ) { 274 if (instance->irq != NO_IRQ) {
276#ifndef SCSI_G_NCR5380_MEM
277 /* set IRQ for HP C2502 */ 275 /* set IRQ for HP C2502 */
278 if (board == BOARD_HP_C2502) 276 if (board == BOARD_HP_C2502)
279 magic_configure(port_idx, instance->irq, magic); 277 magic_configure(port_idx, instance->irq, magic);
280#endif
281 if (request_irq(instance->irq, generic_NCR5380_intr, 278 if (request_irq(instance->irq, generic_NCR5380_intr,
282 0, "NCR5380", instance)) { 279 0, "NCR5380", instance)) {
283 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); 280 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
@@ -303,38 +300,39 @@ out_free_irq:
303 NCR5380_exit(instance); 300 NCR5380_exit(instance);
304out_unregister: 301out_unregister:
305 scsi_host_put(instance); 302 scsi_host_put(instance);
306out_release: 303out_unmap:
307#ifndef SCSI_G_NCR5380_MEM
308 release_region(base, region_size);
309#else
310 iounmap(iomem); 304 iounmap(iomem);
311 release_mem_region(base, iomem_size); 305out_release:
312#endif 306 if (is_pmio)
307 release_region(base, region_size);
308 else
309 release_mem_region(base, region_size);
313 return ret; 310 return ret;
314} 311}
315 312
316static void generic_NCR5380_release_resources(struct Scsi_Host *instance) 313static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
317{ 314{
315 struct NCR5380_hostdata *hostdata = shost_priv(instance);
316 void __iomem *iomem = hostdata->io;
317 unsigned long io_port = hostdata->io_port;
318 unsigned long base = hostdata->base;
319 unsigned long region_size = hostdata->region_size;
320
318 scsi_remove_host(instance); 321 scsi_remove_host(instance);
319 if (instance->irq != NO_IRQ) 322 if (instance->irq != NO_IRQ)
320 free_irq(instance->irq, instance); 323 free_irq(instance->irq, instance);
321 NCR5380_exit(instance); 324 NCR5380_exit(instance);
322#ifndef SCSI_G_NCR5380_MEM
323 release_region(instance->io_port, instance->n_io_port);
324#else
325 {
326 struct NCR5380_hostdata *hostdata = shost_priv(instance);
327
328 iounmap(hostdata->iomem);
329 release_mem_region(instance->base, hostdata->iomem_size);
330 }
331#endif
332 scsi_host_put(instance); 325 scsi_host_put(instance);
326 iounmap(iomem);
327 if (io_port)
328 release_region(io_port, region_size);
329 else
330 release_mem_region(base, region_size);
333} 331}
334 332
335/** 333/**
336 * generic_NCR5380_pread - pseudo DMA read 334 * generic_NCR5380_pread - pseudo DMA read
337 * @instance: adapter to read from 335 * @hostdata: scsi host private data
338 * @dst: buffer to read into 336 * @dst: buffer to read into
339 * @len: buffer length 337 * @len: buffer length
340 * 338 *
@@ -342,10 +340,9 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
342 * controller 340 * controller
343 */ 341 */
344 342
345static inline int generic_NCR5380_pread(struct Scsi_Host *instance, 343static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata,
346 unsigned char *dst, int len) 344 unsigned char *dst, int len)
347{ 345{
348 struct NCR5380_hostdata *hostdata = shost_priv(instance);
349 int blocks = len / 128; 346 int blocks = len / 128;
350 int start = 0; 347 int start = 0;
351 348
@@ -361,18 +358,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
361 while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) 358 while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
362 ; /* FIXME - no timeout */ 359 ; /* FIXME - no timeout */
363 360
364#ifndef SCSI_G_NCR5380_MEM 361 if (hostdata->io_port && hostdata->io_width == 2)
365 if (hostdata->io_width == 2) 362 insw(hostdata->io_port + hostdata->c400_host_buf,
366 insw(instance->io_port + hostdata->c400_host_buf,
367 dst + start, 64); 363 dst + start, 64);
368 else 364 else if (hostdata->io_port)
369 insb(instance->io_port + hostdata->c400_host_buf, 365 insb(hostdata->io_port + hostdata->c400_host_buf,
370 dst + start, 128); 366 dst + start, 128);
371#else 367 else
372 /* implies SCSI_G_NCR5380_MEM */ 368 memcpy_fromio(dst + start,
373 memcpy_fromio(dst + start, 369 hostdata->io + NCR53C400_host_buffer, 128);
374 hostdata->iomem + NCR53C400_host_buffer, 128); 370
375#endif
376 start += 128; 371 start += 128;
377 blocks--; 372 blocks--;
378 } 373 }
@@ -381,18 +376,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
381 while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) 376 while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
382 ; /* FIXME - no timeout */ 377 ; /* FIXME - no timeout */
383 378
384#ifndef SCSI_G_NCR5380_MEM 379 if (hostdata->io_port && hostdata->io_width == 2)
385 if (hostdata->io_width == 2) 380 insw(hostdata->io_port + hostdata->c400_host_buf,
386 insw(instance->io_port + hostdata->c400_host_buf,
387 dst + start, 64); 381 dst + start, 64);
388 else 382 else if (hostdata->io_port)
389 insb(instance->io_port + hostdata->c400_host_buf, 383 insb(hostdata->io_port + hostdata->c400_host_buf,
390 dst + start, 128); 384 dst + start, 128);
391#else 385 else
392 /* implies SCSI_G_NCR5380_MEM */ 386 memcpy_fromio(dst + start,
393 memcpy_fromio(dst + start, 387 hostdata->io + NCR53C400_host_buffer, 128);
394 hostdata->iomem + NCR53C400_host_buffer, 128); 388
395#endif
396 start += 128; 389 start += 128;
397 blocks--; 390 blocks--;
398 } 391 }
@@ -412,7 +405,7 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
412 405
413/** 406/**
414 * generic_NCR5380_pwrite - pseudo DMA write 407 * generic_NCR5380_pwrite - pseudo DMA write
415 * @instance: adapter to read from 408 * @hostdata: scsi host private data
416 * @dst: buffer to read into 409 * @dst: buffer to read into
417 * @len: buffer length 410 * @len: buffer length
418 * 411 *
@@ -420,10 +413,9 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
420 * controller 413 * controller
421 */ 414 */
422 415
423static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance, 416static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata,
424 unsigned char *src, int len) 417 unsigned char *src, int len)
425{ 418{
426 struct NCR5380_hostdata *hostdata = shost_priv(instance);
427 int blocks = len / 128; 419 int blocks = len / 128;
428 int start = 0; 420 int start = 0;
429 421
@@ -439,18 +431,17 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
439 break; 431 break;
440 while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) 432 while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
441 ; // FIXME - timeout 433 ; // FIXME - timeout
442#ifndef SCSI_G_NCR5380_MEM 434
443 if (hostdata->io_width == 2) 435 if (hostdata->io_port && hostdata->io_width == 2)
444 outsw(instance->io_port + hostdata->c400_host_buf, 436 outsw(hostdata->io_port + hostdata->c400_host_buf,
445 src + start, 64); 437 src + start, 64);
446 else 438 else if (hostdata->io_port)
447 outsb(instance->io_port + hostdata->c400_host_buf, 439 outsb(hostdata->io_port + hostdata->c400_host_buf,
448 src + start, 128); 440 src + start, 128);
449#else 441 else
450 /* implies SCSI_G_NCR5380_MEM */ 442 memcpy_toio(hostdata->io + NCR53C400_host_buffer,
451 memcpy_toio(hostdata->iomem + NCR53C400_host_buffer, 443 src + start, 128);
452 src + start, 128); 444
453#endif
454 start += 128; 445 start += 128;
455 blocks--; 446 blocks--;
456 } 447 }
@@ -458,18 +449,16 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
458 while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) 449 while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
459 ; // FIXME - no timeout 450 ; // FIXME - no timeout
460 451
461#ifndef SCSI_G_NCR5380_MEM 452 if (hostdata->io_port && hostdata->io_width == 2)
462 if (hostdata->io_width == 2) 453 outsw(hostdata->io_port + hostdata->c400_host_buf,
463 outsw(instance->io_port + hostdata->c400_host_buf,
464 src + start, 64); 454 src + start, 64);
465 else 455 else if (hostdata->io_port)
466 outsb(instance->io_port + hostdata->c400_host_buf, 456 outsb(hostdata->io_port + hostdata->c400_host_buf,
467 src + start, 128); 457 src + start, 128);
468#else 458 else
469 /* implies SCSI_G_NCR5380_MEM */ 459 memcpy_toio(hostdata->io + NCR53C400_host_buffer,
470 memcpy_toio(hostdata->iomem + NCR53C400_host_buffer, 460 src + start, 128);
471 src + start, 128); 461
472#endif
473 start += 128; 462 start += 128;
474 blocks--; 463 blocks--;
475 } 464 }
@@ -489,10 +478,9 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
489 return 0; 478 return 0;
490} 479}
491 480
492static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance, 481static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
493 struct scsi_cmnd *cmd) 482 struct scsi_cmnd *cmd)
494{ 483{
495 struct NCR5380_hostdata *hostdata = shost_priv(instance);
496 int transfersize = cmd->transfersize; 484 int transfersize = cmd->transfersize;
497 485
498 if (hostdata->flags & FLAG_NO_PSEUDO_DMA) 486 if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
@@ -566,7 +554,7 @@ static struct isa_driver generic_NCR5380_isa_driver = {
566 }, 554 },
567}; 555};
568 556
569#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) 557#ifdef CONFIG_PNP
570static struct pnp_device_id generic_NCR5380_pnp_ids[] = { 558static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
571 { .id = "DTC436e", .driver_data = BOARD_DTC3181E }, 559 { .id = "DTC436e", .driver_data = BOARD_DTC3181E },
572 { .id = "" } 560 { .id = "" }
@@ -600,7 +588,7 @@ static struct pnp_driver generic_NCR5380_pnp_driver = {
600 .probe = generic_NCR5380_pnp_probe, 588 .probe = generic_NCR5380_pnp_probe,
601 .remove = generic_NCR5380_pnp_remove, 589 .remove = generic_NCR5380_pnp_remove,
602}; 590};
603#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */ 591#endif /* defined(CONFIG_PNP) */
604 592
605static int pnp_registered, isa_registered; 593static int pnp_registered, isa_registered;
606 594
@@ -624,7 +612,7 @@ static int __init generic_NCR5380_init(void)
624 card[0] = BOARD_HP_C2502; 612 card[0] = BOARD_HP_C2502;
625 } 613 }
626 614
627#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) 615#ifdef CONFIG_PNP
628 if (!pnp_register_driver(&generic_NCR5380_pnp_driver)) 616 if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
629 pnp_registered = 1; 617 pnp_registered = 1;
630#endif 618#endif
@@ -637,7 +625,7 @@ static int __init generic_NCR5380_init(void)
637 625
638static void __exit generic_NCR5380_exit(void) 626static void __exit generic_NCR5380_exit(void)
639{ 627{
640#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) 628#ifdef CONFIG_PNP
641 if (pnp_registered) 629 if (pnp_registered)
642 pnp_unregister_driver(&generic_NCR5380_pnp_driver); 630 pnp_unregister_driver(&generic_NCR5380_pnp_driver);
643#endif 631#endif
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index b175b9234458..3ce5b65ccb00 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,49 +14,28 @@
14#ifndef GENERIC_NCR5380_H 14#ifndef GENERIC_NCR5380_H
15#define GENERIC_NCR5380_H 15#define GENERIC_NCR5380_H
16 16
17#ifndef SCSI_G_NCR5380_MEM
18#define DRV_MODULE_NAME "g_NCR5380" 17#define DRV_MODULE_NAME "g_NCR5380"
19 18
20#define NCR5380_read(reg) \ 19#define NCR5380_read(reg) \
21 inb(instance->io_port + (reg)) 20 ioread8(hostdata->io + hostdata->offset + (reg))
22#define NCR5380_write(reg, value) \ 21#define NCR5380_write(reg, value) \
23 outb(value, instance->io_port + (reg)) 22 iowrite8(value, hostdata->io + hostdata->offset + (reg))
24 23
25#define NCR5380_implementation_fields \ 24#define NCR5380_implementation_fields \
25 int offset; \
26 int c400_ctl_status; \ 26 int c400_ctl_status; \
27 int c400_blk_cnt; \ 27 int c400_blk_cnt; \
28 int c400_host_buf; \ 28 int c400_host_buf; \
29 int io_width; 29 int io_width;
30 30
31#else
32/* therefore SCSI_G_NCR5380_MEM */
33#define DRV_MODULE_NAME "g_NCR5380_mmio"
34
35#define NCR53C400_mem_base 0x3880 31#define NCR53C400_mem_base 0x3880
36#define NCR53C400_host_buffer 0x3900 32#define NCR53C400_host_buffer 0x3900
37#define NCR53C400_region_size 0x3a00 33#define NCR53C400_region_size 0x3a00
38 34
39#define NCR5380_read(reg) \ 35#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len
40 readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
41 NCR53C400_mem_base + (reg))
42#define NCR5380_write(reg, value) \
43 writeb(value, ((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
44 NCR53C400_mem_base + (reg))
45
46#define NCR5380_implementation_fields \
47 void __iomem *iomem; \
48 resource_size_t iomem_size; \
49 int c400_ctl_status; \
50 int c400_blk_cnt; \
51 int c400_host_buf;
52
53#endif
54
55#define NCR5380_dma_xfer_len(instance, cmd, phase) \
56 generic_NCR5380_dma_xfer_len(instance, cmd)
57#define NCR5380_dma_recv_setup generic_NCR5380_pread 36#define NCR5380_dma_recv_setup generic_NCR5380_pread
58#define NCR5380_dma_send_setup generic_NCR5380_pwrite 37#define NCR5380_dma_send_setup generic_NCR5380_pwrite
59#define NCR5380_dma_residual(instance) (0) 38#define NCR5380_dma_residual NCR5380_dma_residual_none
60 39
61#define NCR5380_intr generic_NCR5380_intr 40#define NCR5380_intr generic_NCR5380_intr
62#define NCR5380_queue_command generic_NCR5380_queue_command 41#define NCR5380_queue_command generic_NCR5380_queue_command
@@ -73,4 +52,3 @@
73#define BOARD_HP_C2502 4 52#define BOARD_HP_C2502 4
74 53
75#endif /* GENERIC_NCR5380_H */ 54#endif /* GENERIC_NCR5380_H */
76
diff --git a/drivers/scsi/g_NCR5380_mmio.c b/drivers/scsi/g_NCR5380_mmio.c
deleted file mode 100644
index 8cdde71ba0c8..000000000000
--- a/drivers/scsi/g_NCR5380_mmio.c
+++ /dev/null
@@ -1,10 +0,0 @@
1/*
2 * There is probably a nicer way to do this but this one makes
3 * pretty obvious what is happening. We rebuild the same file with
4 * different options for mmio versus pio.
5 */
6
7#define SCSI_G_NCR5380_MEM
8
9#include "g_NCR5380.c"
10
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 72c98522bd26..c0cd505a9ef7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -13,6 +13,7 @@
13#define _HISI_SAS_H_ 13#define _HISI_SAS_H_
14 14
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <linux/clk.h>
16#include <linux/dmapool.h> 17#include <linux/dmapool.h>
17#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
18#include <linux/module.h> 19#include <linux/module.h>
@@ -110,7 +111,7 @@ struct hisi_sas_device {
110 struct domain_device *sas_device; 111 struct domain_device *sas_device;
111 u64 attached_phy; 112 u64 attached_phy;
112 u64 device_id; 113 u64 device_id;
113 u64 running_req; 114 atomic64_t running_req;
114 u8 dev_status; 115 u8 dev_status;
115}; 116};
116 117
@@ -149,7 +150,8 @@ struct hisi_sas_hw {
149 struct domain_device *device); 150 struct domain_device *device);
150 struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); 151 struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
151 void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); 152 void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
152 int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s); 153 int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id,
154 int *q, int *s);
153 void (*start_delivery)(struct hisi_hba *hisi_hba); 155 void (*start_delivery)(struct hisi_hba *hisi_hba);
154 int (*prep_ssp)(struct hisi_hba *hisi_hba, 156 int (*prep_ssp)(struct hisi_hba *hisi_hba,
155 struct hisi_sas_slot *slot, int is_tmf, 157 struct hisi_sas_slot *slot, int is_tmf,
@@ -166,6 +168,9 @@ struct hisi_sas_hw {
166 void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no); 168 void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
167 void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no); 169 void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
168 void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no); 170 void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
171 void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
172 struct sas_phy_linkrates *linkrates);
173 enum sas_linkrate (*phy_get_max_linkrate)(void);
169 void (*free_device)(struct hisi_hba *hisi_hba, 174 void (*free_device)(struct hisi_hba *hisi_hba,
170 struct hisi_sas_device *dev); 175 struct hisi_sas_device *dev);
171 int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); 176 int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
@@ -183,6 +188,7 @@ struct hisi_hba {
183 u32 ctrl_reset_reg; 188 u32 ctrl_reset_reg;
184 u32 ctrl_reset_sts_reg; 189 u32 ctrl_reset_sts_reg;
185 u32 ctrl_clock_ena_reg; 190 u32 ctrl_clock_ena_reg;
191 u32 refclk_frequency_mhz;
186 u8 sas_addr[SAS_ADDR_SIZE]; 192 u8 sas_addr[SAS_ADDR_SIZE];
187 193
188 int n_phy; 194 int n_phy;
@@ -205,7 +211,6 @@ struct hisi_hba {
205 struct hisi_sas_port port[HISI_SAS_MAX_PHYS]; 211 struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
206 212
207 int queue_count; 213 int queue_count;
208 int queue;
209 struct hisi_sas_slot *slot_prep; 214 struct hisi_sas_slot *slot_prep;
210 215
211 struct dma_pool *sge_page_pool; 216 struct dma_pool *sge_page_pool;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 2f872f784e10..d50e9cfefd24 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -162,8 +162,8 @@ out:
162 hisi_sas_slot_task_free(hisi_hba, task, abort_slot); 162 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
163 if (task->task_done) 163 if (task->task_done)
164 task->task_done(task); 164 task->task_done(task);
165 if (sas_dev && sas_dev->running_req) 165 if (sas_dev)
166 sas_dev->running_req--; 166 atomic64_dec(&sas_dev->running_req);
167} 167}
168 168
169static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, 169static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
@@ -232,8 +232,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
232 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 232 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
233 if (rc) 233 if (rc)
234 goto err_out; 234 goto err_out;
235 rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue, 235 rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
236 &dlvry_queue_slot); 236 &dlvry_queue, &dlvry_queue_slot);
237 if (rc) 237 if (rc)
238 goto err_out_tag; 238 goto err_out_tag;
239 239
@@ -303,7 +303,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
303 303
304 hisi_hba->slot_prep = slot; 304 hisi_hba->slot_prep = slot;
305 305
306 sas_dev->running_req++; 306 atomic64_inc(&sas_dev->running_req);
307 ++(*pass); 307 ++(*pass);
308 308
309 return 0; 309 return 0;
@@ -369,9 +369,14 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
369 struct sas_phy *sphy = sas_phy->phy; 369 struct sas_phy *sphy = sas_phy->phy;
370 370
371 sphy->negotiated_linkrate = sas_phy->linkrate; 371 sphy->negotiated_linkrate = sas_phy->linkrate;
372 sphy->minimum_linkrate = phy->minimum_linkrate;
373 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 372 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
374 sphy->maximum_linkrate = phy->maximum_linkrate; 373 sphy->maximum_linkrate_hw =
374 hisi_hba->hw->phy_get_max_linkrate();
375 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
376 sphy->minimum_linkrate = phy->minimum_linkrate;
377
378 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
379 sphy->maximum_linkrate = phy->maximum_linkrate;
375 } 380 }
376 381
377 if (phy->phy_type & PORT_TYPE_SAS) { 382 if (phy->phy_type & PORT_TYPE_SAS) {
@@ -537,7 +542,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
537 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 542 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
538 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 543 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
539 struct asd_sas_port *sas_port = sas_phy->port; 544 struct asd_sas_port *sas_port = sas_phy->port;
540 struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id]; 545 struct hisi_sas_port *port = &hisi_hba->port[phy->port_id];
541 unsigned long flags; 546 unsigned long flags;
542 547
543 if (!sas_port) 548 if (!sas_port)
@@ -645,6 +650,9 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
645 break; 650 break;
646 651
647 case PHY_FUNC_SET_LINK_RATE: 652 case PHY_FUNC_SET_LINK_RATE:
653 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
654 break;
655
648 case PHY_FUNC_RELEASE_SPINUP_HOLD: 656 case PHY_FUNC_RELEASE_SPINUP_HOLD:
649 default: 657 default:
650 return -EOPNOTSUPP; 658 return -EOPNOTSUPP;
@@ -764,7 +772,8 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
764 task = NULL; 772 task = NULL;
765 } 773 }
766ex_err: 774ex_err:
767 WARN_ON(retry == TASK_RETRY); 775 if (retry == TASK_RETRY)
776 dev_warn(dev, "abort tmf: executing internal task failed!\n");
768 sas_free_task(task); 777 sas_free_task(task);
769 return res; 778 return res;
770} 779}
@@ -960,6 +969,9 @@ static int hisi_sas_query_task(struct sas_task *task)
960 case TMF_RESP_FUNC_FAILED: 969 case TMF_RESP_FUNC_FAILED:
961 case TMF_RESP_FUNC_COMPLETE: 970 case TMF_RESP_FUNC_COMPLETE:
962 break; 971 break;
972 default:
973 rc = TMF_RESP_FUNC_FAILED;
974 break;
963 } 975 }
964 } 976 }
965 return rc; 977 return rc;
@@ -987,8 +999,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
987 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 999 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
988 if (rc) 1000 if (rc)
989 goto err_out; 1001 goto err_out;
990 rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue, 1002 rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
991 &dlvry_queue_slot); 1003 &dlvry_queue, &dlvry_queue_slot);
992 if (rc) 1004 if (rc)
993 goto err_out_tag; 1005 goto err_out_tag;
994 1006
@@ -1023,7 +1035,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
1023 1035
1024 hisi_hba->slot_prep = slot; 1036 hisi_hba->slot_prep = slot;
1025 1037
1026 sas_dev->running_req++; 1038 atomic64_inc(&sas_dev->running_req);
1039
1027 /* send abort command to our chip */ 1040 /* send abort command to our chip */
1028 hisi_hba->hw->start_delivery(hisi_hba); 1041 hisi_hba->hw->start_delivery(hisi_hba);
1029 1042
@@ -1396,10 +1409,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1396 struct hisi_hba *hisi_hba; 1409 struct hisi_hba *hisi_hba;
1397 struct device *dev = &pdev->dev; 1410 struct device *dev = &pdev->dev;
1398 struct device_node *np = pdev->dev.of_node; 1411 struct device_node *np = pdev->dev.of_node;
1412 struct clk *refclk;
1399 1413
1400 shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba)); 1414 shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
1401 if (!shost) 1415 if (!shost) {
1402 goto err_out; 1416 dev_err(dev, "scsi host alloc failed\n");
1417 return NULL;
1418 }
1403 hisi_hba = shost_priv(shost); 1419 hisi_hba = shost_priv(shost);
1404 1420
1405 hisi_hba->hw = hw; 1421 hisi_hba->hw = hw;
@@ -1432,6 +1448,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1432 goto err_out; 1448 goto err_out;
1433 } 1449 }
1434 1450
1451 refclk = devm_clk_get(&pdev->dev, NULL);
1452 if (IS_ERR(refclk))
1453 dev_info(dev, "no ref clk property\n");
1454 else
1455 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1456
1435 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) 1457 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
1436 goto err_out; 1458 goto err_out;
1437 1459
@@ -1457,6 +1479,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1457 1479
1458 return shost; 1480 return shost;
1459err_out: 1481err_out:
1482 kfree(shost);
1460 dev_err(dev, "shost alloc failed\n"); 1483 dev_err(dev, "shost alloc failed\n");
1461 return NULL; 1484 return NULL;
1462} 1485}
@@ -1483,10 +1506,8 @@ int hisi_sas_probe(struct platform_device *pdev,
1483 int rc, phy_nr, port_nr, i; 1506 int rc, phy_nr, port_nr, i;
1484 1507
1485 shost = hisi_sas_shost_alloc(pdev, hw); 1508 shost = hisi_sas_shost_alloc(pdev, hw);
1486 if (!shost) { 1509 if (!shost)
1487 rc = -ENOMEM; 1510 return -ENOMEM;
1488 goto err_out_ha;
1489 }
1490 1511
1491 sha = SHOST_TO_SAS_HA(shost); 1512 sha = SHOST_TO_SAS_HA(shost);
1492 hisi_hba = shost_priv(shost); 1513 hisi_hba = shost_priv(shost);
@@ -1496,12 +1517,13 @@ int hisi_sas_probe(struct platform_device *pdev,
1496 1517
1497 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 1518 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1498 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 1519 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1499 if (!arr_phy || !arr_port) 1520 if (!arr_phy || !arr_port) {
1500 return -ENOMEM; 1521 rc = -ENOMEM;
1522 goto err_out_ha;
1523 }
1501 1524
1502 sha->sas_phy = arr_phy; 1525 sha->sas_phy = arr_phy;
1503 sha->sas_port = arr_port; 1526 sha->sas_port = arr_port;
1504 sha->core.shost = shost;
1505 sha->lldd_ha = hisi_hba; 1527 sha->lldd_ha = hisi_hba;
1506 1528
1507 shost->transportt = hisi_sas_stt; 1529 shost->transportt = hisi_sas_stt;
@@ -1546,6 +1568,7 @@ int hisi_sas_probe(struct platform_device *pdev,
1546err_out_register_ha: 1568err_out_register_ha:
1547 scsi_remove_host(shost); 1569 scsi_remove_host(shost);
1548err_out_ha: 1570err_out_ha:
1571 hisi_sas_free(hisi_hba);
1549 kfree(shost); 1572 kfree(shost);
1550 return rc; 1573 return rc;
1551} 1574}
@@ -1555,12 +1578,14 @@ int hisi_sas_remove(struct platform_device *pdev)
1555{ 1578{
1556 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 1579 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
1557 struct hisi_hba *hisi_hba = sha->lldd_ha; 1580 struct hisi_hba *hisi_hba = sha->lldd_ha;
1581 struct Scsi_Host *shost = sha->core.shost;
1558 1582
1559 scsi_remove_host(sha->core.shost); 1583 scsi_remove_host(sha->core.shost);
1560 sas_unregister_ha(sha); 1584 sas_unregister_ha(sha);
1561 sas_remove_host(sha->core.shost); 1585 sas_remove_host(sha->core.shost);
1562 1586
1563 hisi_sas_free(hisi_hba); 1587 hisi_sas_free(hisi_hba);
1588 kfree(shost);
1564 return 0; 1589 return 0;
1565} 1590}
1566EXPORT_SYMBOL_GPL(hisi_sas_remove); 1591EXPORT_SYMBOL_GPL(hisi_sas_remove);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index c0ac49d8bc8d..8a1be0ba8a22 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -843,6 +843,49 @@ static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
843 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 843 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
844} 844}
845 845
846static enum sas_linkrate phy_get_max_linkrate_v1_hw(void)
847{
848 return SAS_LINK_RATE_6_0_GBPS;
849}
850
851static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no,
852 struct sas_phy_linkrates *r)
853{
854 u32 prog_phy_link_rate =
855 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
856 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
857 struct asd_sas_phy *sas_phy = &phy->sas_phy;
858 int i;
859 enum sas_linkrate min, max;
860 u32 rate_mask = 0;
861
862 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
863 max = sas_phy->phy->maximum_linkrate;
864 min = r->minimum_linkrate;
865 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
866 max = r->maximum_linkrate;
867 min = sas_phy->phy->minimum_linkrate;
868 } else
869 return;
870
871 sas_phy->phy->maximum_linkrate = max;
872 sas_phy->phy->minimum_linkrate = min;
873
874 min -= SAS_LINK_RATE_1_5_GBPS;
875 max -= SAS_LINK_RATE_1_5_GBPS;
876
877 for (i = 0; i <= max; i++)
878 rate_mask |= 1 << (i * 2);
879
880 prog_phy_link_rate &= ~0xff;
881 prog_phy_link_rate |= rate_mask;
882
883 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
884 prog_phy_link_rate);
885
886 phy_hard_reset_v1_hw(hisi_hba, phy_no);
887}
888
846static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) 889static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
847{ 890{
848 int i, bitmap = 0; 891 int i, bitmap = 0;
@@ -862,29 +905,23 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
862 * The callpath to this function and upto writing the write 905 * The callpath to this function and upto writing the write
863 * queue pointer should be safe from interruption. 906 * queue pointer should be safe from interruption.
864 */ 907 */
865static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s) 908static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
909 int *q, int *s)
866{ 910{
867 struct device *dev = &hisi_hba->pdev->dev; 911 struct device *dev = &hisi_hba->pdev->dev;
868 struct hisi_sas_dq *dq; 912 struct hisi_sas_dq *dq;
869 u32 r, w; 913 u32 r, w;
870 int queue = hisi_hba->queue; 914 int queue = dev_id % hisi_hba->queue_count;
871 915
872 while (1) { 916 dq = &hisi_hba->dq[queue];
873 dq = &hisi_hba->dq[queue]; 917 w = dq->wr_point;
874 w = dq->wr_point; 918 r = hisi_sas_read32_relaxed(hisi_hba,
875 r = hisi_sas_read32_relaxed(hisi_hba, 919 DLVRY_Q_0_RD_PTR + (queue * 0x14));
876 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 920 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
877 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 921 dev_warn(dev, "could not find free slot\n");
878 queue = (queue + 1) % hisi_hba->queue_count; 922 return -EAGAIN;
879 if (queue == hisi_hba->queue) {
880 dev_warn(dev, "could not find free slot\n");
881 return -EAGAIN;
882 }
883 continue;
884 }
885 break;
886 } 923 }
887 hisi_hba->queue = (queue + 1) % hisi_hba->queue_count; 924
888 *q = queue; 925 *q = queue;
889 *s = w; 926 *s = w;
890 return 0; 927 return 0;
@@ -1372,8 +1409,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
1372 } 1409 }
1373 1410
1374out: 1411out:
1375 if (sas_dev && sas_dev->running_req) 1412 if (sas_dev)
1376 sas_dev->running_req--; 1413 atomic64_dec(&sas_dev->running_req);
1377 1414
1378 hisi_sas_slot_task_free(hisi_hba, task, slot); 1415 hisi_sas_slot_task_free(hisi_hba, task, slot);
1379 sts = ts->stat; 1416 sts = ts->stat;
@@ -1824,6 +1861,8 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
1824 .phy_enable = enable_phy_v1_hw, 1861 .phy_enable = enable_phy_v1_hw,
1825 .phy_disable = disable_phy_v1_hw, 1862 .phy_disable = disable_phy_v1_hw,
1826 .phy_hard_reset = phy_hard_reset_v1_hw, 1863 .phy_hard_reset = phy_hard_reset_v1_hw,
1864 .phy_set_linkrate = phy_set_linkrate_v1_hw,
1865 .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw,
1827 .get_wideport_bitmap = get_wideport_bitmap_v1_hw, 1866 .get_wideport_bitmap = get_wideport_bitmap_v1_hw,
1828 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW, 1867 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
1829 .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr), 1868 .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9825a3f49f53..b934aec1eebb 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -55,10 +55,44 @@
55#define HGC_DFX_CFG2 0xc0 55#define HGC_DFX_CFG2 0xc0
56#define HGC_IOMB_PROC1_STATUS 0x104 56#define HGC_IOMB_PROC1_STATUS 0x104
57#define CFG_1US_TIMER_TRSH 0xcc 57#define CFG_1US_TIMER_TRSH 0xcc
58#define HGC_LM_DFX_STATUS2 0x128
59#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
60#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
61 HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
62#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
63#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
64 HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
65#define HGC_CQE_ECC_ADDR 0x13c
66#define HGC_CQE_ECC_1B_ADDR_OFF 0
67#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
68#define HGC_CQE_ECC_MB_ADDR_OFF 8
69#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
70#define HGC_IOST_ECC_ADDR 0x140
71#define HGC_IOST_ECC_1B_ADDR_OFF 0
72#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
73#define HGC_IOST_ECC_MB_ADDR_OFF 16
74#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
75#define HGC_DQE_ECC_ADDR 0x144
76#define HGC_DQE_ECC_1B_ADDR_OFF 0
77#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
78#define HGC_DQE_ECC_MB_ADDR_OFF 16
79#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
58#define HGC_INVLD_DQE_INFO 0x148 80#define HGC_INVLD_DQE_INFO 0x148
59#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9 81#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
60#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF) 82#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
61#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18 83#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
84#define HGC_ITCT_ECC_ADDR 0x150
85#define HGC_ITCT_ECC_1B_ADDR_OFF 0
86#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
87 HGC_ITCT_ECC_1B_ADDR_OFF)
88#define HGC_ITCT_ECC_MB_ADDR_OFF 16
89#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
90 HGC_ITCT_ECC_MB_ADDR_OFF)
91#define HGC_AXI_FIFO_ERR_INFO 0x154
92#define AXI_ERR_INFO_OFF 0
93#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
94#define FIFO_ERR_INFO_OFF 8
95#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
62#define INT_COAL_EN 0x19c 96#define INT_COAL_EN 0x19c
63#define OQ_INT_COAL_TIME 0x1a0 97#define OQ_INT_COAL_TIME 0x1a0
64#define OQ_INT_COAL_CNT 0x1a4 98#define OQ_INT_COAL_CNT 0x1a4
@@ -73,13 +107,41 @@
73#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 107#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
74#define ENT_INT_SRC2 0x1bc 108#define ENT_INT_SRC2 0x1bc
75#define ENT_INT_SRC3 0x1c0 109#define ENT_INT_SRC3 0x1c0
110#define ENT_INT_SRC3_WP_DEPTH_OFF 8
111#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
112#define ENT_INT_SRC3_RP_DEPTH_OFF 10
113#define ENT_INT_SRC3_AXI_OFF 11
114#define ENT_INT_SRC3_FIFO_OFF 12
115#define ENT_INT_SRC3_LM_OFF 14
76#define ENT_INT_SRC3_ITC_INT_OFF 15 116#define ENT_INT_SRC3_ITC_INT_OFF 15
77#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 117#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
118#define ENT_INT_SRC3_ABT_OFF 16
78#define ENT_INT_SRC_MSK1 0x1c4 119#define ENT_INT_SRC_MSK1 0x1c4
79#define ENT_INT_SRC_MSK2 0x1c8 120#define ENT_INT_SRC_MSK2 0x1c8
80#define ENT_INT_SRC_MSK3 0x1cc 121#define ENT_INT_SRC_MSK3 0x1cc
81#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 122#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
82#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 123#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
124#define SAS_ECC_INTR 0x1e8
125#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
126#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
127#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
128#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
129#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4
130#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5
131#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6
132#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7
133#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8
134#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9
135#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
136#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
137#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12
138#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13
139#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14
140#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15
141#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16
142#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17
143#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18
144#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19
83#define SAS_ECC_INTR_MSK 0x1ec 145#define SAS_ECC_INTR_MSK 0x1ec
84#define HGC_ERR_STAT_EN 0x238 146#define HGC_ERR_STAT_EN 0x238
85#define DLVRY_Q_0_BASE_ADDR_LO 0x260 147#define DLVRY_Q_0_BASE_ADDR_LO 0x260
@@ -94,7 +156,20 @@
94#define COMPL_Q_0_DEPTH 0x4e8 156#define COMPL_Q_0_DEPTH 0x4e8
95#define COMPL_Q_0_WR_PTR 0x4ec 157#define COMPL_Q_0_WR_PTR 0x4ec
96#define COMPL_Q_0_RD_PTR 0x4f0 158#define COMPL_Q_0_RD_PTR 0x4f0
97 159#define HGC_RXM_DFX_STATUS14 0xae8
160#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
161#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
162 HGC_RXM_DFX_STATUS14_MEM0_OFF)
163#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
164#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
165 HGC_RXM_DFX_STATUS14_MEM1_OFF)
166#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
167#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
168 HGC_RXM_DFX_STATUS14_MEM2_OFF)
169#define HGC_RXM_DFX_STATUS15 0xaec
170#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
171#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
172 HGC_RXM_DFX_STATUS15_MEM3_OFF)
98/* phy registers need init */ 173/* phy registers need init */
99#define PORT_BASE (0x2000) 174#define PORT_BASE (0x2000)
100 175
@@ -119,6 +194,9 @@
119#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 194#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
120#define SL_CONTROL_CTA_OFF 17 195#define SL_CONTROL_CTA_OFF 17
121#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF) 196#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF)
197#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
198#define RX_BCAST_CHG_OFF 1
199#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
122#define TX_ID_DWORD0 (PORT_BASE + 0x9c) 200#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
123#define TX_ID_DWORD1 (PORT_BASE + 0xa0) 201#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
124#define TX_ID_DWORD2 (PORT_BASE + 0xa4) 202#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -267,6 +345,8 @@
267#define ITCT_HDR_RTOLT_OFF 48 345#define ITCT_HDR_RTOLT_OFF 48
268#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 346#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
269 347
348#define HISI_SAS_FATAL_INT_NR 2
349
270struct hisi_sas_complete_v2_hdr { 350struct hisi_sas_complete_v2_hdr {
271 __le32 dw0; 351 __le32 dw0;
272 __le32 dw1; 352 __le32 dw1;
@@ -659,8 +739,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
659 qw0 &= ~(1 << ITCT_HDR_VALID_OFF); 739 qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
660 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 740 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
661 ENT_INT_SRC3_ITC_INT_MSK); 741 ENT_INT_SRC3_ITC_INT_MSK);
662 hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
663 hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
664 742
665 /* clear the itct */ 743 /* clear the itct */
666 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 744 hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
@@ -808,7 +886,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
808 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); 886 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
809 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); 887 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
810 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe); 888 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
811 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0); 889 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
812 for (i = 0; i < hisi_hba->queue_count; i++) 890 for (i = 0; i < hisi_hba->queue_count; i++)
813 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); 891 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
814 892
@@ -824,7 +902,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
824 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10); 902 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
825 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 903 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
826 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 904 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
827 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 905 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
828 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 906 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
829 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 907 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
830 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); 908 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
@@ -836,7 +914,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
836 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 914 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
837 hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0); 915 hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
838 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); 916 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
839 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); 917 if (hisi_hba->refclk_frequency_mhz == 66)
918 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
919 /* else, do nothing -> leave it how you found it */
840 } 920 }
841 921
842 for (i = 0; i < hisi_hba->queue_count; i++) { 922 for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -980,6 +1060,49 @@ static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
980 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1060 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
981} 1061}
982 1062
1063static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
1064{
1065 return SAS_LINK_RATE_12_0_GBPS;
1066}
1067
1068static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
1069 struct sas_phy_linkrates *r)
1070{
1071 u32 prog_phy_link_rate =
1072 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
1073 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1074 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1075 int i;
1076 enum sas_linkrate min, max;
1077 u32 rate_mask = 0;
1078
1079 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1080 max = sas_phy->phy->maximum_linkrate;
1081 min = r->minimum_linkrate;
1082 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1083 max = r->maximum_linkrate;
1084 min = sas_phy->phy->minimum_linkrate;
1085 } else
1086 return;
1087
1088 sas_phy->phy->maximum_linkrate = max;
1089 sas_phy->phy->minimum_linkrate = min;
1090
1091 min -= SAS_LINK_RATE_1_5_GBPS;
1092 max -= SAS_LINK_RATE_1_5_GBPS;
1093
1094 for (i = 0; i <= max; i++)
1095 rate_mask |= 1 << (i * 2);
1096
1097 prog_phy_link_rate &= ~0xff;
1098 prog_phy_link_rate |= rate_mask;
1099
1100 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
1101 prog_phy_link_rate);
1102
1103 phy_hard_reset_v2_hw(hisi_hba, phy_no);
1104}
1105
983static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) 1106static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
984{ 1107{
985 int i, bitmap = 0; 1108 int i, bitmap = 0;
@@ -1010,29 +1133,24 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
1010 * The callpath to this function and upto writing the write 1133 * The callpath to this function and upto writing the write
1011 * queue pointer should be safe from interruption. 1134 * queue pointer should be safe from interruption.
1012 */ 1135 */
1013static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s) 1136static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
1137 int *q, int *s)
1014{ 1138{
1015 struct device *dev = &hisi_hba->pdev->dev; 1139 struct device *dev = &hisi_hba->pdev->dev;
1016 struct hisi_sas_dq *dq; 1140 struct hisi_sas_dq *dq;
1017 u32 r, w; 1141 u32 r, w;
1018 int queue = hisi_hba->queue; 1142 int queue = dev_id % hisi_hba->queue_count;
1019 1143
1020 while (1) { 1144 dq = &hisi_hba->dq[queue];
1021 dq = &hisi_hba->dq[queue]; 1145 w = dq->wr_point;
1022 w = dq->wr_point; 1146 r = hisi_sas_read32_relaxed(hisi_hba,
1023 r = hisi_sas_read32_relaxed(hisi_hba, 1147 DLVRY_Q_0_RD_PTR + (queue * 0x14));
1024 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 1148 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
1025 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 1149 dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
1026 queue = (queue + 1) % hisi_hba->queue_count; 1150 queue, r, w);
1027 if (queue == hisi_hba->queue) { 1151 return -EAGAIN;
1028 dev_warn(dev, "could not find free slot\n");
1029 return -EAGAIN;
1030 }
1031 continue;
1032 }
1033 break;
1034 } 1152 }
1035 hisi_hba->queue = (queue + 1) % hisi_hba->queue_count; 1153
1036 *q = queue; 1154 *q = queue;
1037 *s = w; 1155 *s = w;
1038 return 0; 1156 return 0;
@@ -1653,8 +1771,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
1653 } 1771 }
1654 1772
1655out: 1773out:
1656 if (sas_dev && sas_dev->running_req) 1774 if (sas_dev)
1657 sas_dev->running_req--; 1775 atomic64_dec(&sas_dev->running_req);
1658 1776
1659 hisi_sas_slot_task_free(hisi_hba, task, slot); 1777 hisi_sas_slot_task_free(hisi_hba, task, slot);
1660 sts = ts->stat; 1778 sts = ts->stat;
@@ -1675,6 +1793,7 @@ static u8 get_ata_protocol(u8 cmd, int direction)
1675 case ATA_CMD_NCQ_NON_DATA: 1793 case ATA_CMD_NCQ_NON_DATA:
1676 return SATA_PROTOCOL_FPDMA; 1794 return SATA_PROTOCOL_FPDMA;
1677 1795
1796 case ATA_CMD_DOWNLOAD_MICRO:
1678 case ATA_CMD_ID_ATA: 1797 case ATA_CMD_ID_ATA:
1679 case ATA_CMD_PMP_READ: 1798 case ATA_CMD_PMP_READ:
1680 case ATA_CMD_READ_LOG_EXT: 1799 case ATA_CMD_READ_LOG_EXT:
@@ -1686,18 +1805,27 @@ static u8 get_ata_protocol(u8 cmd, int direction)
1686 case ATA_CMD_PIO_WRITE_EXT: 1805 case ATA_CMD_PIO_WRITE_EXT:
1687 return SATA_PROTOCOL_PIO; 1806 return SATA_PROTOCOL_PIO;
1688 1807
1808 case ATA_CMD_DSM:
1809 case ATA_CMD_DOWNLOAD_MICRO_DMA:
1810 case ATA_CMD_PMP_READ_DMA:
1811 case ATA_CMD_PMP_WRITE_DMA:
1689 case ATA_CMD_READ: 1812 case ATA_CMD_READ:
1690 case ATA_CMD_READ_EXT: 1813 case ATA_CMD_READ_EXT:
1691 case ATA_CMD_READ_LOG_DMA_EXT: 1814 case ATA_CMD_READ_LOG_DMA_EXT:
1815 case ATA_CMD_READ_STREAM_DMA_EXT:
1816 case ATA_CMD_TRUSTED_RCV_DMA:
1817 case ATA_CMD_TRUSTED_SND_DMA:
1692 case ATA_CMD_WRITE: 1818 case ATA_CMD_WRITE:
1693 case ATA_CMD_WRITE_EXT: 1819 case ATA_CMD_WRITE_EXT:
1820 case ATA_CMD_WRITE_FUA_EXT:
1694 case ATA_CMD_WRITE_QUEUED: 1821 case ATA_CMD_WRITE_QUEUED:
1695 case ATA_CMD_WRITE_LOG_DMA_EXT: 1822 case ATA_CMD_WRITE_LOG_DMA_EXT:
1823 case ATA_CMD_WRITE_STREAM_DMA_EXT:
1696 return SATA_PROTOCOL_DMA; 1824 return SATA_PROTOCOL_DMA;
1697 1825
1698 case ATA_CMD_DOWNLOAD_MICRO:
1699 case ATA_CMD_DEV_RESET:
1700 case ATA_CMD_CHK_POWER: 1826 case ATA_CMD_CHK_POWER:
1827 case ATA_CMD_DEV_RESET:
1828 case ATA_CMD_EDD:
1701 case ATA_CMD_FLUSH: 1829 case ATA_CMD_FLUSH:
1702 case ATA_CMD_FLUSH_EXT: 1830 case ATA_CMD_FLUSH_EXT:
1703 case ATA_CMD_VERIFY: 1831 case ATA_CMD_VERIFY:
@@ -1970,9 +2098,12 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
1970 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2098 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1971 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2099 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1972 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 2100 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
2101 u32 bcast_status;
1973 2102
1974 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 2103 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
1975 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 2104 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
2105 if (bcast_status & RX_BCAST_CHG_MSK)
2106 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
1976 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2107 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1977 CHL_INT0_SL_RX_BCST_ACK_MSK); 2108 CHL_INT0_SL_RX_BCST_ACK_MSK);
1978 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 2109 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2005,8 +2136,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
2005 if (irq_value1) { 2136 if (irq_value1) {
2006 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | 2137 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
2007 CHL_INT1_DMAC_TX_ECC_ERR_MSK)) 2138 CHL_INT1_DMAC_TX_ECC_ERR_MSK))
2008 panic("%s: DMAC RX/TX ecc bad error! (0x%x)", 2139 panic("%s: DMAC RX/TX ecc bad error!\
2009 dev_name(dev), irq_value1); 2140 (0x%x)",
2141 dev_name(dev), irq_value1);
2010 2142
2011 hisi_sas_phy_write32(hisi_hba, phy_no, 2143 hisi_sas_phy_write32(hisi_hba, phy_no,
2012 CHL_INT1, irq_value1); 2144 CHL_INT1, irq_value1);
@@ -2037,6 +2169,318 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
2037 return IRQ_HANDLED; 2169 return IRQ_HANDLED;
2038} 2170}
2039 2171
2172static void
2173one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
2174{
2175 struct device *dev = &hisi_hba->pdev->dev;
2176 u32 reg_val;
2177
2178 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
2179 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
2180 dev_warn(dev, "hgc_dqe_acc1b_intr found: \
2181 Ram address is 0x%08X\n",
2182 (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >>
2183 HGC_DQE_ECC_1B_ADDR_OFF);
2184 }
2185
2186 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) {
2187 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
2188 dev_warn(dev, "hgc_iost_acc1b_intr found: \
2189 Ram address is 0x%08X\n",
2190 (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >>
2191 HGC_IOST_ECC_1B_ADDR_OFF);
2192 }
2193
2194 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) {
2195 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
2196 dev_warn(dev, "hgc_itct_acc1b_intr found: \
2197 Ram address is 0x%08X\n",
2198 (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >>
2199 HGC_ITCT_ECC_1B_ADDR_OFF);
2200 }
2201
2202 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) {
2203 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
2204 dev_warn(dev, "hgc_iostl_acc1b_intr found: \
2205 memory address is 0x%08X\n",
2206 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
2207 HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
2208 }
2209
2210 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) {
2211 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
2212 dev_warn(dev, "hgc_itctl_acc1b_intr found: \
2213 memory address is 0x%08X\n",
2214 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
2215 HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
2216 }
2217
2218 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) {
2219 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
2220 dev_warn(dev, "hgc_cqe_acc1b_intr found: \
2221 Ram address is 0x%08X\n",
2222 (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >>
2223 HGC_CQE_ECC_1B_ADDR_OFF);
2224 }
2225
2226 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) {
2227 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2228 dev_warn(dev, "rxm_mem0_acc1b_intr found: \
2229 memory address is 0x%08X\n",
2230 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
2231 HGC_RXM_DFX_STATUS14_MEM0_OFF);
2232 }
2233
2234 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) {
2235 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2236 dev_warn(dev, "rxm_mem1_acc1b_intr found: \
2237 memory address is 0x%08X\n",
2238 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
2239 HGC_RXM_DFX_STATUS14_MEM1_OFF);
2240 }
2241
2242 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) {
2243 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2244 dev_warn(dev, "rxm_mem2_acc1b_intr found: \
2245 memory address is 0x%08X\n",
2246 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
2247 HGC_RXM_DFX_STATUS14_MEM2_OFF);
2248 }
2249
2250 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) {
2251 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
2252 dev_warn(dev, "rxm_mem3_acc1b_intr found: \
2253 memory address is 0x%08X\n",
2254 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
2255 HGC_RXM_DFX_STATUS15_MEM3_OFF);
2256 }
2257
2258}
2259
2260static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
2261 u32 irq_value)
2262{
2263 u32 reg_val;
2264 struct device *dev = &hisi_hba->pdev->dev;
2265
2266 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
2267 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
2268 panic("%s: hgc_dqe_accbad_intr (0x%x) found: \
2269 Ram address is 0x%08X\n",
2270 dev_name(dev), irq_value,
2271 (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
2272 HGC_DQE_ECC_MB_ADDR_OFF);
2273 }
2274
2275 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
2276 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
2277 panic("%s: hgc_iost_accbad_intr (0x%x) found: \
2278 Ram address is 0x%08X\n",
2279 dev_name(dev), irq_value,
2280 (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
2281 HGC_IOST_ECC_MB_ADDR_OFF);
2282 }
2283
2284 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
2285 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
2286 panic("%s: hgc_itct_accbad_intr (0x%x) found: \
2287 Ram address is 0x%08X\n",
2288 dev_name(dev), irq_value,
2289 (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
2290 HGC_ITCT_ECC_MB_ADDR_OFF);
2291 }
2292
2293 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
2294 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
2295 panic("%s: hgc_iostl_accbad_intr (0x%x) found: \
2296 memory address is 0x%08X\n",
2297 dev_name(dev), irq_value,
2298 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
2299 HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
2300 }
2301
2302 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
2303 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
2304 panic("%s: hgc_itctl_accbad_intr (0x%x) found: \
2305 memory address is 0x%08X\n",
2306 dev_name(dev), irq_value,
2307 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
2308 HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
2309 }
2310
2311 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
2312 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
2313 panic("%s: hgc_cqe_accbad_intr (0x%x) found: \
2314 Ram address is 0x%08X\n",
2315 dev_name(dev), irq_value,
2316 (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
2317 HGC_CQE_ECC_MB_ADDR_OFF);
2318 }
2319
2320 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
2321 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2322 panic("%s: rxm_mem0_accbad_intr (0x%x) found: \
2323 memory address is 0x%08X\n",
2324 dev_name(dev), irq_value,
2325 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
2326 HGC_RXM_DFX_STATUS14_MEM0_OFF);
2327 }
2328
2329 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
2330 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2331 panic("%s: rxm_mem1_accbad_intr (0x%x) found: \
2332 memory address is 0x%08X\n",
2333 dev_name(dev), irq_value,
2334 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
2335 HGC_RXM_DFX_STATUS14_MEM1_OFF);
2336 }
2337
2338 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
2339 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2340 panic("%s: rxm_mem2_accbad_intr (0x%x) found: \
2341 memory address is 0x%08X\n",
2342 dev_name(dev), irq_value,
2343 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
2344 HGC_RXM_DFX_STATUS14_MEM2_OFF);
2345 }
2346
2347 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
2348 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
2349 panic("%s: rxm_mem3_accbad_intr (0x%x) found: \
2350 memory address is 0x%08X\n",
2351 dev_name(dev), irq_value,
2352 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
2353 HGC_RXM_DFX_STATUS15_MEM3_OFF);
2354 }
2355
2356}
2357
2358static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
2359{
2360 struct hisi_hba *hisi_hba = p;
2361 u32 irq_value, irq_msk;
2362
2363 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
2364 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
2365
2366 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
2367 if (irq_value) {
2368 one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
2369 multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
2370 }
2371
2372 hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
2373 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
2374
2375 return IRQ_HANDLED;
2376}
2377
2378#define AXI_ERR_NR 8
2379static const char axi_err_info[AXI_ERR_NR][32] = {
2380 "IOST_AXI_W_ERR",
2381 "IOST_AXI_R_ERR",
2382 "ITCT_AXI_W_ERR",
2383 "ITCT_AXI_R_ERR",
2384 "SATA_AXI_W_ERR",
2385 "SATA_AXI_R_ERR",
2386 "DQE_AXI_R_ERR",
2387 "CQE_AXI_W_ERR"
2388};
2389
2390#define FIFO_ERR_NR 5
2391static const char fifo_err_info[FIFO_ERR_NR][32] = {
2392 "CQE_WINFO_FIFO",
2393 "CQE_MSG_FIFIO",
2394 "GETDQE_FIFO",
2395 "CMDP_FIFO",
2396 "AWTCTRL_FIFO"
2397};
2398
2399static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
2400{
2401 struct hisi_hba *hisi_hba = p;
2402 u32 irq_value, irq_msk, err_value;
2403 struct device *dev = &hisi_hba->pdev->dev;
2404
2405 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
2406 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
2407
2408 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
2409 if (irq_value) {
2410 if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
2411 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2412 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
2413 panic("%s: write pointer and depth error (0x%x) \
2414 found!\n",
2415 dev_name(dev), irq_value);
2416 }
2417
2418 if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
2419 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2420 1 <<
2421 ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
2422 panic("%s: iptt no match slot error (0x%x) found!\n",
2423 dev_name(dev), irq_value);
2424 }
2425
2426 if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF))
2427 panic("%s: read pointer and depth error (0x%x) \
2428 found!\n",
2429 dev_name(dev), irq_value);
2430
2431 if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
2432 int i;
2433
2434 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2435 1 << ENT_INT_SRC3_AXI_OFF);
2436 err_value = hisi_sas_read32(hisi_hba,
2437 HGC_AXI_FIFO_ERR_INFO);
2438
2439 for (i = 0; i < AXI_ERR_NR; i++) {
2440 if (err_value & BIT(i))
2441 panic("%s: %s (0x%x) found!\n",
2442 dev_name(dev),
2443 axi_err_info[i], irq_value);
2444 }
2445 }
2446
2447 if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
2448 int i;
2449
2450 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2451 1 << ENT_INT_SRC3_FIFO_OFF);
2452 err_value = hisi_sas_read32(hisi_hba,
2453 HGC_AXI_FIFO_ERR_INFO);
2454
2455 for (i = 0; i < FIFO_ERR_NR; i++) {
2456 if (err_value & BIT(AXI_ERR_NR + i))
2457 panic("%s: %s (0x%x) found!\n",
2458 dev_name(dev),
2459 fifo_err_info[i], irq_value);
2460 }
2461
2462 }
2463
2464 if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
2465 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2466 1 << ENT_INT_SRC3_LM_OFF);
2467 panic("%s: LM add/fetch list error (0x%x) found!\n",
2468 dev_name(dev), irq_value);
2469 }
2470
2471 if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
2472 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2473 1 << ENT_INT_SRC3_ABT_OFF);
2474 panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
2475 dev_name(dev), irq_value);
2476 }
2477 }
2478
2479 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
2480
2481 return IRQ_HANDLED;
2482}
2483
2040static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) 2484static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
2041{ 2485{
2042 struct hisi_sas_cq *cq = p; 2486 struct hisi_sas_cq *cq = p;
@@ -2136,6 +2580,16 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
2136 goto end; 2580 goto end;
2137 } 2581 }
2138 2582
2583 /* check ERR bit of Status Register */
2584 if (fis->status & ATA_ERR) {
2585 dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
2586 fis->status);
2587 disable_phy_v2_hw(hisi_hba, phy_no);
2588 enable_phy_v2_hw(hisi_hba, phy_no);
2589 res = IRQ_NONE;
2590 goto end;
2591 }
2592
2139 if (unlikely(phy_no == 8)) { 2593 if (unlikely(phy_no == 8)) {
2140 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 2594 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
2141 2595
@@ -2190,6 +2644,11 @@ static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
2190 int_chnl_int_v2_hw, 2644 int_chnl_int_v2_hw,
2191}; 2645};
2192 2646
2647static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
2648 fatal_ecc_int_v2_hw,
2649 fatal_axi_int_v2_hw
2650};
2651
2193/** 2652/**
2194 * There is a limitation in the hip06 chipset that we need 2653 * There is a limitation in the hip06 chipset that we need
2195 * to map in all mbigen interrupts, even if they are not used. 2654 * to map in all mbigen interrupts, even if they are not used.
@@ -2245,6 +2704,26 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
2245 } 2704 }
2246 } 2705 }
2247 2706
2707 for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) {
2708 int idx = i;
2709
2710 irq = irq_map[idx + 81];
2711 if (!irq) {
2712 dev_err(dev, "irq init: fail map fatal interrupt %d\n",
2713 idx);
2714 return -ENOENT;
2715 }
2716
2717 rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
2718 DRV_NAME " fatal", hisi_hba);
2719 if (rc) {
2720 dev_err(dev,
2721 "irq init: could not request fatal interrupt %d, rc=%d\n",
2722 irq, rc);
2723 return -ENOENT;
2724 }
2725 }
2726
2248 for (i = 0; i < hisi_hba->queue_count; i++) { 2727 for (i = 0; i < hisi_hba->queue_count; i++) {
2249 int idx = i + 96; /* First cq interrupt is irq96 */ 2728 int idx = i + 96; /* First cq interrupt is irq96 */
2250 2729
@@ -2303,12 +2782,26 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
2303 .phy_enable = enable_phy_v2_hw, 2782 .phy_enable = enable_phy_v2_hw,
2304 .phy_disable = disable_phy_v2_hw, 2783 .phy_disable = disable_phy_v2_hw,
2305 .phy_hard_reset = phy_hard_reset_v2_hw, 2784 .phy_hard_reset = phy_hard_reset_v2_hw,
2785 .phy_set_linkrate = phy_set_linkrate_v2_hw,
2786 .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
2306 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, 2787 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
2307 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), 2788 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
2308}; 2789};
2309 2790
2310static int hisi_sas_v2_probe(struct platform_device *pdev) 2791static int hisi_sas_v2_probe(struct platform_device *pdev)
2311{ 2792{
2793 /*
2794 * Check if we should defer the probe before we probe the
2795 * upper layer, as it's hard to defer later on.
2796 */
2797 int ret = platform_get_irq(pdev, 0);
2798
2799 if (ret < 0) {
2800 if (ret != -EPROBE_DEFER)
2801 dev_err(&pdev->dev, "cannot obtain irq\n");
2802 return ret;
2803 }
2804
2312 return hisi_sas_probe(pdev, &hisi_sas_v2_hw); 2805 return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
2313} 2806}
2314 2807
@@ -2319,6 +2812,7 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
2319 2812
2320static const struct of_device_id sas_v2_of_match[] = { 2813static const struct of_device_id sas_v2_of_match[] = {
2321 { .compatible = "hisilicon,hip06-sas-v2",}, 2814 { .compatible = "hisilicon,hip06-sas-v2",},
2815 { .compatible = "hisilicon,hip07-sas-v2",},
2322 {}, 2816 {},
2323}; 2817};
2324MODULE_DEVICE_TABLE(of, sas_v2_of_match); 2818MODULE_DEVICE_TABLE(of, sas_v2_of_match);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a1d6ab76a514..691a09316952 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -276,6 +276,9 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
276static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 276static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
277 unsigned long *memory_bar); 277 unsigned long *memory_bar);
278static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 278static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
279static int wait_for_device_to_become_ready(struct ctlr_info *h,
280 unsigned char lunaddr[],
281 int reply_queue);
279static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 282static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
280 int wait_for_ready); 283 int wait_for_ready);
281static inline void finish_cmd(struct CommandList *c); 284static inline void finish_cmd(struct CommandList *c);
@@ -700,9 +703,7 @@ static ssize_t lunid_show(struct device *dev,
700 } 703 }
701 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 704 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
702 spin_unlock_irqrestore(&h->lock, flags); 705 spin_unlock_irqrestore(&h->lock, flags);
703 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 706 return snprintf(buf, 20, "0x%8phN\n", lunid);
704 lunid[0], lunid[1], lunid[2], lunid[3],
705 lunid[4], lunid[5], lunid[6], lunid[7]);
706} 707}
707 708
708static ssize_t unique_id_show(struct device *dev, 709static ssize_t unique_id_show(struct device *dev,
@@ -864,6 +865,16 @@ static ssize_t path_info_show(struct device *dev,
864 return output_len; 865 return output_len;
865} 866}
866 867
868static ssize_t host_show_ctlr_num(struct device *dev,
869 struct device_attribute *attr, char *buf)
870{
871 struct ctlr_info *h;
872 struct Scsi_Host *shost = class_to_shost(dev);
873
874 h = shost_to_hba(shost);
875 return snprintf(buf, 20, "%d\n", h->ctlr);
876}
877
867static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 878static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
868static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 879static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
869static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 880static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
@@ -887,6 +898,8 @@ static DEVICE_ATTR(resettable, S_IRUGO,
887 host_show_resettable, NULL); 898 host_show_resettable, NULL);
888static DEVICE_ATTR(lockup_detected, S_IRUGO, 899static DEVICE_ATTR(lockup_detected, S_IRUGO,
889 host_show_lockup_detected, NULL); 900 host_show_lockup_detected, NULL);
901static DEVICE_ATTR(ctlr_num, S_IRUGO,
902 host_show_ctlr_num, NULL);
890 903
891static struct device_attribute *hpsa_sdev_attrs[] = { 904static struct device_attribute *hpsa_sdev_attrs[] = {
892 &dev_attr_raid_level, 905 &dev_attr_raid_level,
@@ -907,6 +920,7 @@ static struct device_attribute *hpsa_shost_attrs[] = {
907 &dev_attr_hp_ssd_smart_path_status, 920 &dev_attr_hp_ssd_smart_path_status,
908 &dev_attr_raid_offload_debug, 921 &dev_attr_raid_offload_debug,
909 &dev_attr_lockup_detected, 922 &dev_attr_lockup_detected,
923 &dev_attr_ctlr_num,
910 NULL, 924 NULL,
911}; 925};
912 926
@@ -1001,7 +1015,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1001{ 1015{
1002 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 1016 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1003 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 1017 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1004 if (unlikely(!h->msix_vector)) 1018 if (unlikely(!h->msix_vectors))
1005 return; 1019 return;
1006 if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) 1020 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1007 c->Header.ReplyQueue = 1021 c->Header.ReplyQueue =
@@ -2541,7 +2555,7 @@ static void complete_scsi_command(struct CommandList *cp)
2541 2555
2542 if ((unlikely(hpsa_is_pending_event(cp)))) { 2556 if ((unlikely(hpsa_is_pending_event(cp)))) {
2543 if (cp->reset_pending) 2557 if (cp->reset_pending)
2544 return hpsa_cmd_resolve_and_free(h, cp); 2558 return hpsa_cmd_free_and_done(h, cp, cmd);
2545 if (cp->abort_pending) 2559 if (cp->abort_pending)
2546 return hpsa_cmd_abort_and_free(h, cp, cmd); 2560 return hpsa_cmd_abort_and_free(h, cp, cmd);
2547 } 2561 }
@@ -2824,14 +2838,8 @@ static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2824 const u8 *cdb = c->Request.CDB; 2838 const u8 *cdb = c->Request.CDB;
2825 const u8 *lun = c->Header.LUN.LunAddrBytes; 2839 const u8 *lun = c->Header.LUN.LunAddrBytes;
2826 2840
2827 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" 2841 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2828 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2842 txt, lun, cdb);
2829 txt, lun[0], lun[1], lun[2], lun[3],
2830 lun[4], lun[5], lun[6], lun[7],
2831 cdb[0], cdb[1], cdb[2], cdb[3],
2832 cdb[4], cdb[5], cdb[6], cdb[7],
2833 cdb[8], cdb[9], cdb[10], cdb[11],
2834 cdb[12], cdb[13], cdb[14], cdb[15]);
2835} 2843}
2836 2844
2837static void hpsa_scsi_interpret_error(struct ctlr_info *h, 2845static void hpsa_scsi_interpret_error(struct ctlr_info *h,
@@ -3080,6 +3088,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3080 3088
3081 if (unlikely(rc)) 3089 if (unlikely(rc))
3082 atomic_set(&dev->reset_cmds_out, 0); 3090 atomic_set(&dev->reset_cmds_out, 0);
3091 else
3092 wait_for_device_to_become_ready(h, scsi3addr, 0);
3083 3093
3084 mutex_unlock(&h->reset_mutex); 3094 mutex_unlock(&h->reset_mutex);
3085 return rc; 3095 return rc;
@@ -3623,8 +3633,32 @@ out:
3623static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 3633static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3624 struct ReportExtendedLUNdata *buf, int bufsize) 3634 struct ReportExtendedLUNdata *buf, int bufsize)
3625{ 3635{
3626 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, 3636 int rc;
3627 HPSA_REPORT_PHYS_EXTENDED); 3637 struct ReportLUNdata *lbuf;
3638
3639 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3640 HPSA_REPORT_PHYS_EXTENDED);
3641 if (!rc || !hpsa_allow_any)
3642 return rc;
3643
3644 /* REPORT PHYS EXTENDED is not supported */
3645 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3646 if (!lbuf)
3647 return -ENOMEM;
3648
3649 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3650 if (!rc) {
3651 int i;
3652 u32 nphys;
3653
3654 /* Copy ReportLUNdata header */
3655 memcpy(buf, lbuf, 8);
3656 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3657 for (i = 0; i < nphys; i++)
3658 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3659 }
3660 kfree(lbuf);
3661 return rc;
3628} 3662}
3629 3663
3630static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 3664static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@ -5488,7 +5522,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5488 5522
5489 dev = cmd->device->hostdata; 5523 dev = cmd->device->hostdata;
5490 if (!dev) { 5524 if (!dev) {
5491 cmd->result = NOT_READY << 16; /* host byte */ 5525 cmd->result = DID_NO_CONNECT << 16;
5492 cmd->scsi_done(cmd); 5526 cmd->scsi_done(cmd);
5493 return 0; 5527 return 0;
5494 } 5528 }
@@ -5569,6 +5603,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
5569 if (unlikely(lockup_detected(h))) 5603 if (unlikely(lockup_detected(h)))
5570 return hpsa_scan_complete(h); 5604 return hpsa_scan_complete(h);
5571 5605
5606 /*
5607 * Do the scan after a reset completion
5608 */
5609 if (h->reset_in_progress) {
5610 h->drv_req_rescan = 1;
5611 return;
5612 }
5613
5572 hpsa_update_scsi_devices(h); 5614 hpsa_update_scsi_devices(h);
5573 5615
5574 hpsa_scan_complete(h); 5616 hpsa_scan_complete(h);
@@ -5624,7 +5666,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5624 sh->sg_tablesize = h->maxsgentries; 5666 sh->sg_tablesize = h->maxsgentries;
5625 sh->transportt = hpsa_sas_transport_template; 5667 sh->transportt = hpsa_sas_transport_template;
5626 sh->hostdata[0] = (unsigned long) h; 5668 sh->hostdata[0] = (unsigned long) h;
5627 sh->irq = h->intr[h->intr_mode]; 5669 sh->irq = pci_irq_vector(h->pdev, 0);
5628 sh->unique_id = sh->irq; 5670 sh->unique_id = sh->irq;
5629 5671
5630 h->scsi_host = sh; 5672 h->scsi_host = sh;
@@ -5999,11 +6041,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5999 6041
6000 if (h->raid_offload_debug > 0) 6042 if (h->raid_offload_debug > 0)
6001 dev_info(&h->pdev->dev, 6043 dev_info(&h->pdev->dev,
6002 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 6044 "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
6003 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 6045 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
6004 "Reset as abort", 6046 "Reset as abort", scsi3addr);
6005 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
6006 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
6007 6047
6008 if (!dev->offload_enabled) { 6048 if (!dev->offload_enabled) {
6009 dev_warn(&h->pdev->dev, 6049 dev_warn(&h->pdev->dev,
@@ -6020,32 +6060,28 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
6020 /* send the reset */ 6060 /* send the reset */
6021 if (h->raid_offload_debug > 0) 6061 if (h->raid_offload_debug > 0)
6022 dev_info(&h->pdev->dev, 6062 dev_info(&h->pdev->dev,
6023 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 6063 "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
6024 psa[0], psa[1], psa[2], psa[3], 6064 psa);
6025 psa[4], psa[5], psa[6], psa[7]);
6026 rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue); 6065 rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
6027 if (rc != 0) { 6066 if (rc != 0) {
6028 dev_warn(&h->pdev->dev, 6067 dev_warn(&h->pdev->dev,
6029 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 6068 "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
6030 psa[0], psa[1], psa[2], psa[3], 6069 psa);
6031 psa[4], psa[5], psa[6], psa[7]);
6032 return rc; /* failed to reset */ 6070 return rc; /* failed to reset */
6033 } 6071 }
6034 6072
6035 /* wait for device to recover */ 6073 /* wait for device to recover */
6036 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { 6074 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
6037 dev_warn(&h->pdev->dev, 6075 dev_warn(&h->pdev->dev,
6038 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 6076 "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
6039 psa[0], psa[1], psa[2], psa[3], 6077 psa);
6040 psa[4], psa[5], psa[6], psa[7]);
6041 return -1; /* failed to recover */ 6078 return -1; /* failed to recover */
6042 } 6079 }
6043 6080
6044 /* device recovered */ 6081 /* device recovered */
6045 dev_info(&h->pdev->dev, 6082 dev_info(&h->pdev->dev,
6046 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 6083 "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
6047 psa[0], psa[1], psa[2], psa[3], 6084 psa);
6048 psa[4], psa[5], psa[6], psa[7]);
6049 6085
6050 return rc; /* success */ 6086 return rc; /* success */
6051} 6087}
@@ -6663,8 +6699,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6663 return -EINVAL; 6699 return -EINVAL;
6664 if (!capable(CAP_SYS_RAWIO)) 6700 if (!capable(CAP_SYS_RAWIO))
6665 return -EPERM; 6701 return -EPERM;
6666 ioc = (BIG_IOCTL_Command_struct *) 6702 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
6667 kmalloc(sizeof(*ioc), GFP_KERNEL);
6668 if (!ioc) { 6703 if (!ioc) {
6669 status = -ENOMEM; 6704 status = -ENOMEM;
6670 goto cleanup1; 6705 goto cleanup1;
@@ -7658,67 +7693,41 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7658 7693
7659static void hpsa_disable_interrupt_mode(struct ctlr_info *h) 7694static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7660{ 7695{
7661 if (h->msix_vector) { 7696 pci_free_irq_vectors(h->pdev);
7662 if (h->pdev->msix_enabled) 7697 h->msix_vectors = 0;
7663 pci_disable_msix(h->pdev);
7664 h->msix_vector = 0;
7665 } else if (h->msi_vector) {
7666 if (h->pdev->msi_enabled)
7667 pci_disable_msi(h->pdev);
7668 h->msi_vector = 0;
7669 }
7670} 7698}
7671 7699
7672/* If MSI/MSI-X is supported by the kernel we will try to enable it on 7700/* If MSI/MSI-X is supported by the kernel we will try to enable it on
7673 * controllers that are capable. If not, we use legacy INTx mode. 7701 * controllers that are capable. If not, we use legacy INTx mode.
7674 */ 7702 */
7675static void hpsa_interrupt_mode(struct ctlr_info *h) 7703static int hpsa_interrupt_mode(struct ctlr_info *h)
7676{ 7704{
7677#ifdef CONFIG_PCI_MSI 7705 unsigned int flags = PCI_IRQ_LEGACY;
7678 int err, i; 7706 int ret;
7679 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
7680
7681 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
7682 hpsa_msix_entries[i].vector = 0;
7683 hpsa_msix_entries[i].entry = i;
7684 }
7685 7707
7686 /* Some boards advertise MSI but don't really support it */ 7708 /* Some boards advertise MSI but don't really support it */
7687 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 7709 switch (h->board_id) {
7688 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 7710 case 0x40700E11:
7689 goto default_int_mode; 7711 case 0x40800E11:
7690 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 7712 case 0x40820E11:
7691 dev_info(&h->pdev->dev, "MSI-X capable controller\n"); 7713 case 0x40830E11:
7692 h->msix_vector = MAX_REPLY_QUEUES; 7714 break;
7693 if (h->msix_vector > num_online_cpus()) 7715 default:
7694 h->msix_vector = num_online_cpus(); 7716 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7695 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, 7717 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7696 1, h->msix_vector); 7718 if (ret > 0) {
7697 if (err < 0) { 7719 h->msix_vectors = ret;
7698 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); 7720 return 0;
7699 h->msix_vector = 0;
7700 goto single_msi_mode;
7701 } else if (err < h->msix_vector) {
7702 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
7703 "available\n", err);
7704 } 7721 }
7705 h->msix_vector = err; 7722
7706 for (i = 0; i < h->msix_vector; i++) 7723 flags |= PCI_IRQ_MSI;
7707 h->intr[i] = hpsa_msix_entries[i].vector; 7724 break;
7708 return;
7709 }
7710single_msi_mode:
7711 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
7712 dev_info(&h->pdev->dev, "MSI capable controller\n");
7713 if (!pci_enable_msi(h->pdev))
7714 h->msi_vector = 1;
7715 else
7716 dev_warn(&h->pdev->dev, "MSI init failed\n");
7717 } 7725 }
7718default_int_mode: 7726
7719#endif /* CONFIG_PCI_MSI */ 7727 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7720 /* if we get here we're going to use the default interrupt mode */ 7728 if (ret < 0)
7721 h->intr[h->intr_mode] = h->pdev->irq; 7729 return ret;
7730 return 0;
7722} 7731}
7723 7732
7724static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 7733static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -8074,7 +8083,9 @@ static int hpsa_pci_init(struct ctlr_info *h)
8074 8083
8075 pci_set_master(h->pdev); 8084 pci_set_master(h->pdev);
8076 8085
8077 hpsa_interrupt_mode(h); 8086 err = hpsa_interrupt_mode(h);
8087 if (err)
8088 goto clean1;
8078 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 8089 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
8079 if (err) 8090 if (err)
8080 goto clean2; /* intmode+region, pci */ 8091 goto clean2; /* intmode+region, pci */
@@ -8110,6 +8121,7 @@ clean3: /* vaddr, intmode+region, pci */
8110 h->vaddr = NULL; 8121 h->vaddr = NULL;
8111clean2: /* intmode+region, pci */ 8122clean2: /* intmode+region, pci */
8112 hpsa_disable_interrupt_mode(h); 8123 hpsa_disable_interrupt_mode(h);
8124clean1:
8113 /* 8125 /*
8114 * call pci_disable_device before pci_release_regions per 8126 * call pci_disable_device before pci_release_regions per
8115 * Documentation/PCI/pci.txt 8127 * Documentation/PCI/pci.txt
@@ -8243,34 +8255,20 @@ clean_up:
8243 return -ENOMEM; 8255 return -ENOMEM;
8244} 8256}
8245 8257
8246static void hpsa_irq_affinity_hints(struct ctlr_info *h)
8247{
8248 int i, cpu;
8249
8250 cpu = cpumask_first(cpu_online_mask);
8251 for (i = 0; i < h->msix_vector; i++) {
8252 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
8253 cpu = cpumask_next(cpu, cpu_online_mask);
8254 }
8255}
8256
8257/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ 8258/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
8258static void hpsa_free_irqs(struct ctlr_info *h) 8259static void hpsa_free_irqs(struct ctlr_info *h)
8259{ 8260{
8260 int i; 8261 int i;
8261 8262
8262 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 8263 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8263 /* Single reply queue, only one irq to free */ 8264 /* Single reply queue, only one irq to free */
8264 i = h->intr_mode; 8265 free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
8265 irq_set_affinity_hint(h->intr[i], NULL); 8266 h->q[h->intr_mode] = 0;
8266 free_irq(h->intr[i], &h->q[i]);
8267 h->q[i] = 0;
8268 return; 8267 return;
8269 } 8268 }
8270 8269
8271 for (i = 0; i < h->msix_vector; i++) { 8270 for (i = 0; i < h->msix_vectors; i++) {
8272 irq_set_affinity_hint(h->intr[i], NULL); 8271 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8273 free_irq(h->intr[i], &h->q[i]);
8274 h->q[i] = 0; 8272 h->q[i] = 0;
8275 } 8273 }
8276 for (; i < MAX_REPLY_QUEUES; i++) 8274 for (; i < MAX_REPLY_QUEUES; i++)
@@ -8291,11 +8289,11 @@ static int hpsa_request_irqs(struct ctlr_info *h,
8291 for (i = 0; i < MAX_REPLY_QUEUES; i++) 8289 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8292 h->q[i] = (u8) i; 8290 h->q[i] = (u8) i;
8293 8291
8294 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { 8292 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8295 /* If performant mode and MSI-X, use multiple reply queues */ 8293 /* If performant mode and MSI-X, use multiple reply queues */
8296 for (i = 0; i < h->msix_vector; i++) { 8294 for (i = 0; i < h->msix_vectors; i++) {
8297 sprintf(h->intrname[i], "%s-msix%d", h->devname, i); 8295 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8298 rc = request_irq(h->intr[i], msixhandler, 8296 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8299 0, h->intrname[i], 8297 0, h->intrname[i],
8300 &h->q[i]); 8298 &h->q[i]);
8301 if (rc) { 8299 if (rc) {
@@ -8303,9 +8301,9 @@ static int hpsa_request_irqs(struct ctlr_info *h,
8303 8301
8304 dev_err(&h->pdev->dev, 8302 dev_err(&h->pdev->dev,
8305 "failed to get irq %d for %s\n", 8303 "failed to get irq %d for %s\n",
8306 h->intr[i], h->devname); 8304 pci_irq_vector(h->pdev, i), h->devname);
8307 for (j = 0; j < i; j++) { 8305 for (j = 0; j < i; j++) {
8308 free_irq(h->intr[j], &h->q[j]); 8306 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8309 h->q[j] = 0; 8307 h->q[j] = 0;
8310 } 8308 }
8311 for (; j < MAX_REPLY_QUEUES; j++) 8309 for (; j < MAX_REPLY_QUEUES; j++)
@@ -8313,33 +8311,27 @@ static int hpsa_request_irqs(struct ctlr_info *h,
8313 return rc; 8311 return rc;
8314 } 8312 }
8315 } 8313 }
8316 hpsa_irq_affinity_hints(h);
8317 } else { 8314 } else {
8318 /* Use single reply pool */ 8315 /* Use single reply pool */
8319 if (h->msix_vector > 0 || h->msi_vector) { 8316 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8320 if (h->msix_vector) 8317 sprintf(h->intrname[0], "%s-msi%s", h->devname,
8321 sprintf(h->intrname[h->intr_mode], 8318 h->msix_vectors ? "x" : "");
8322 "%s-msix", h->devname); 8319 rc = request_irq(pci_irq_vector(h->pdev, 0),
8323 else
8324 sprintf(h->intrname[h->intr_mode],
8325 "%s-msi", h->devname);
8326 rc = request_irq(h->intr[h->intr_mode],
8327 msixhandler, 0, 8320 msixhandler, 0,
8328 h->intrname[h->intr_mode], 8321 h->intrname[0],
8329 &h->q[h->intr_mode]); 8322 &h->q[h->intr_mode]);
8330 } else { 8323 } else {
8331 sprintf(h->intrname[h->intr_mode], 8324 sprintf(h->intrname[h->intr_mode],
8332 "%s-intx", h->devname); 8325 "%s-intx", h->devname);
8333 rc = request_irq(h->intr[h->intr_mode], 8326 rc = request_irq(pci_irq_vector(h->pdev, 0),
8334 intxhandler, IRQF_SHARED, 8327 intxhandler, IRQF_SHARED,
8335 h->intrname[h->intr_mode], 8328 h->intrname[0],
8336 &h->q[h->intr_mode]); 8329 &h->q[h->intr_mode]);
8337 } 8330 }
8338 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
8339 } 8331 }
8340 if (rc) { 8332 if (rc) {
8341 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", 8333 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8342 h->intr[h->intr_mode], h->devname); 8334 pci_irq_vector(h->pdev, 0), h->devname);
8343 hpsa_free_irqs(h); 8335 hpsa_free_irqs(h);
8344 return -ENODEV; 8336 return -ENODEV;
8345 } 8337 }
@@ -8640,6 +8632,14 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8640 if (h->remove_in_progress) 8632 if (h->remove_in_progress)
8641 return; 8633 return;
8642 8634
8635 /*
8636 * Do the scan after the reset
8637 */
8638 if (h->reset_in_progress) {
8639 h->drv_req_rescan = 1;
8640 return;
8641 }
8642
8643 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 8643 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
8644 scsi_host_get(h->scsi_host); 8644 scsi_host_get(h->scsi_host);
8645 hpsa_ack_ctlr_events(h); 8645 hpsa_ack_ctlr_events(h);
@@ -9525,7 +9525,7 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9525 return rc; 9525 return rc;
9526 } 9526 }
9527 9527
9528 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 9528 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9529 hpsa_get_max_perf_mode_cmds(h); 9529 hpsa_get_max_perf_mode_cmds(h);
9530 /* Performant mode ring buffer and supporting data structures */ 9530 /* Performant mode ring buffer and supporting data structures */
9531 h->reply_queue_size = h->max_commands * sizeof(u64); 9531 h->reply_queue_size = h->max_commands * sizeof(u64);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 9ea162de80dc..64e98295b707 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -176,9 +176,7 @@ struct ctlr_info {
176# define DOORBELL_INT 1 176# define DOORBELL_INT 1
177# define SIMPLE_MODE_INT 2 177# define SIMPLE_MODE_INT 2
178# define MEMQ_MODE_INT 3 178# define MEMQ_MODE_INT 3
179 unsigned int intr[MAX_REPLY_QUEUES]; 179 unsigned int msix_vectors;
180 unsigned int msix_vector;
181 unsigned int msi_vector;
182 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 180 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
183 struct access_method access; 181 struct access_method access;
184 182
@@ -466,7 +464,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
466 unsigned long register_value = FIFO_EMPTY; 464 unsigned long register_value = FIFO_EMPTY;
467 465
468 /* msi auto clears the interrupt pending bit. */ 466 /* msi auto clears the interrupt pending bit. */
469 if (unlikely(!(h->msi_vector || h->msix_vector))) { 467 if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
470 /* flush the controller write of the reply queue by reading 468 /* flush the controller write of the reply queue by reading
471 * outbound doorbell status register. 469 * outbound doorbell status register.
472 */ 470 */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7e487c78279c..78b72c28a55d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -32,6 +32,7 @@
32#include <linux/of.h> 32#include <linux/of.h>
33#include <linux/pm.h> 33#include <linux/pm.h>
34#include <linux/stringify.h> 34#include <linux/stringify.h>
35#include <linux/bsg-lib.h>
35#include <asm/firmware.h> 36#include <asm/firmware.h>
36#include <asm/irq.h> 37#include <asm/irq.h>
37#include <asm/vio.h> 38#include <asm/vio.h>
@@ -1701,14 +1702,14 @@ static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1701 1702
1702/** 1703/**
1703 * ibmvfc_bsg_timeout - Handle a BSG timeout 1704 * ibmvfc_bsg_timeout - Handle a BSG timeout
1704 * @job: struct fc_bsg_job that timed out 1705 * @job: struct bsg_job that timed out
1705 * 1706 *
1706 * Returns: 1707 * Returns:
1707 * 0 on success / other on failure 1708 * 0 on success / other on failure
1708 **/ 1709 **/
1709static int ibmvfc_bsg_timeout(struct fc_bsg_job *job) 1710static int ibmvfc_bsg_timeout(struct bsg_job *job)
1710{ 1711{
1711 struct ibmvfc_host *vhost = shost_priv(job->shost); 1712 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1712 unsigned long port_id = (unsigned long)job->dd_data; 1713 unsigned long port_id = (unsigned long)job->dd_data;
1713 struct ibmvfc_event *evt; 1714 struct ibmvfc_event *evt;
1714 struct ibmvfc_tmf *tmf; 1715 struct ibmvfc_tmf *tmf;
@@ -1814,41 +1815,43 @@ unlock_out:
1814 1815
1815/** 1816/**
1816 * ibmvfc_bsg_request - Handle a BSG request 1817 * ibmvfc_bsg_request - Handle a BSG request
1817 * @job: struct fc_bsg_job to be executed 1818 * @job: struct bsg_job to be executed
1818 * 1819 *
1819 * Returns: 1820 * Returns:
1820 * 0 on success / other on failure 1821 * 0 on success / other on failure
1821 **/ 1822 **/
1822static int ibmvfc_bsg_request(struct fc_bsg_job *job) 1823static int ibmvfc_bsg_request(struct bsg_job *job)
1823{ 1824{
1824 struct ibmvfc_host *vhost = shost_priv(job->shost); 1825 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1825 struct fc_rport *rport = job->rport; 1826 struct fc_rport *rport = fc_bsg_to_rport(job);
1826 struct ibmvfc_passthru_mad *mad; 1827 struct ibmvfc_passthru_mad *mad;
1827 struct ibmvfc_event *evt; 1828 struct ibmvfc_event *evt;
1828 union ibmvfc_iu rsp_iu; 1829 union ibmvfc_iu rsp_iu;
1829 unsigned long flags, port_id = -1; 1830 unsigned long flags, port_id = -1;
1830 unsigned int code = job->request->msgcode; 1831 struct fc_bsg_request *bsg_request = job->request;
1832 struct fc_bsg_reply *bsg_reply = job->reply;
1833 unsigned int code = bsg_request->msgcode;
1831 int rc = 0, req_seg, rsp_seg, issue_login = 0; 1834 int rc = 0, req_seg, rsp_seg, issue_login = 0;
1832 u32 fc_flags, rsp_len; 1835 u32 fc_flags, rsp_len;
1833 1836
1834 ENTER; 1837 ENTER;
1835 job->reply->reply_payload_rcv_len = 0; 1838 bsg_reply->reply_payload_rcv_len = 0;
1836 if (rport) 1839 if (rport)
1837 port_id = rport->port_id; 1840 port_id = rport->port_id;
1838 1841
1839 switch (code) { 1842 switch (code) {
1840 case FC_BSG_HST_ELS_NOLOGIN: 1843 case FC_BSG_HST_ELS_NOLOGIN:
1841 port_id = (job->request->rqst_data.h_els.port_id[0] << 16) | 1844 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
1842 (job->request->rqst_data.h_els.port_id[1] << 8) | 1845 (bsg_request->rqst_data.h_els.port_id[1] << 8) |
1843 job->request->rqst_data.h_els.port_id[2]; 1846 bsg_request->rqst_data.h_els.port_id[2];
1844 case FC_BSG_RPT_ELS: 1847 case FC_BSG_RPT_ELS:
1845 fc_flags = IBMVFC_FC_ELS; 1848 fc_flags = IBMVFC_FC_ELS;
1846 break; 1849 break;
1847 case FC_BSG_HST_CT: 1850 case FC_BSG_HST_CT:
1848 issue_login = 1; 1851 issue_login = 1;
1849 port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) | 1852 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
1850 (job->request->rqst_data.h_ct.port_id[1] << 8) | 1853 (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
1851 job->request->rqst_data.h_ct.port_id[2]; 1854 bsg_request->rqst_data.h_ct.port_id[2];
1852 case FC_BSG_RPT_CT: 1855 case FC_BSG_RPT_CT:
1853 fc_flags = IBMVFC_FC_CT_IU; 1856 fc_flags = IBMVFC_FC_CT_IU;
1854 break; 1857 break;
@@ -1937,13 +1940,14 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job)
1937 if (rsp_iu.passthru.common.status) 1940 if (rsp_iu.passthru.common.status)
1938 rc = -EIO; 1941 rc = -EIO;
1939 else 1942 else
1940 job->reply->reply_payload_rcv_len = rsp_len; 1943 bsg_reply->reply_payload_rcv_len = rsp_len;
1941 1944
1942 spin_lock_irqsave(vhost->host->host_lock, flags); 1945 spin_lock_irqsave(vhost->host->host_lock, flags);
1943 ibmvfc_free_event(evt); 1946 ibmvfc_free_event(evt);
1944 spin_unlock_irqrestore(vhost->host->host_lock, flags); 1947 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1945 job->reply->result = rc; 1948 bsg_reply->result = rc;
1946 job->job_done(job); 1949 bsg_job_done(job, bsg_reply->result,
1950 bsg_reply->reply_payload_rcv_len);
1947 rc = 0; 1951 rc = 0;
1948out: 1952out:
1949 dma_unmap_sg(vhost->dev, job->request_payload.sg_list, 1953 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 642b739ad0da..c9fa3565c671 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,7 +22,7 @@
22 * 22 *
23 ****************************************************************************/ 23 ****************************************************************************/
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -81,7 +81,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
81 } 81 }
82 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 82 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
83 if (se_cmd->data_direction == DMA_TO_DEVICE) { 83 if (se_cmd->data_direction == DMA_TO_DEVICE) {
84 /* residual data from an overflow write */ 84 /* residual data from an overflow write */
85 rsp->flags = SRP_RSP_FLAG_DOOVER; 85 rsp->flags = SRP_RSP_FLAG_DOOVER;
86 rsp->data_out_res_cnt = cpu_to_be32(residual_count); 86 rsp->data_out_res_cnt = cpu_to_be32(residual_count);
87 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 87 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -101,7 +101,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
101 * and the function returns TRUE. 101 * and the function returns TRUE.
102 * 102 *
103 * EXECUTION ENVIRONMENT: 103 * EXECUTION ENVIRONMENT:
104 * Interrupt or Process environment 104 * Interrupt or Process environment
105 */ 105 */
106static bool connection_broken(struct scsi_info *vscsi) 106static bool connection_broken(struct scsi_info *vscsi)
107{ 107{
@@ -324,7 +324,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
324} 324}
325 325
326/** 326/**
327 * ibmvscsis_send_init_message() - send initialize message to the client 327 * ibmvscsis_send_init_message() - send initialize message to the client
328 * @vscsi: Pointer to our adapter structure 328 * @vscsi: Pointer to our adapter structure
329 * @format: Which Init Message format to send 329 * @format: Which Init Message format to send
330 * 330 *
@@ -382,13 +382,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
382 vscsi->cmd_q.base_addr); 382 vscsi->cmd_q.base_addr);
383 if (crq) { 383 if (crq) {
384 *format = (uint)(crq->format); 384 *format = (uint)(crq->format);
385 rc = ERROR; 385 rc = ERROR;
386 crq->valid = INVALIDATE_CMD_RESP_EL; 386 crq->valid = INVALIDATE_CMD_RESP_EL;
387 dma_rmb(); 387 dma_rmb();
388 } 388 }
389 } else { 389 } else {
390 *format = (uint)(crq->format); 390 *format = (uint)(crq->format);
391 rc = ERROR; 391 rc = ERROR;
392 crq->valid = INVALIDATE_CMD_RESP_EL; 392 crq->valid = INVALIDATE_CMD_RESP_EL;
393 dma_rmb(); 393 dma_rmb();
394 } 394 }
@@ -397,166 +397,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
397} 397}
398 398
399/** 399/**
400 * ibmvscsis_establish_new_q() - Establish new CRQ queue
401 * @vscsi: Pointer to our adapter structure
402 * @new_state: New state being established after resetting the queue
403 *
404 * Must be called with interrupt lock held.
405 */
406static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
407{
408 long rc = ADAPT_SUCCESS;
409 uint format;
410
411 vscsi->flags &= PRESERVE_FLAG_FIELDS;
412 vscsi->rsp_q_timer.timer_pops = 0;
413 vscsi->debit = 0;
414 vscsi->credit = 0;
415
416 rc = vio_enable_interrupts(vscsi->dma_dev);
417 if (rc) {
418 pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
419 rc);
420 return rc;
421 }
422
423 rc = ibmvscsis_check_init_msg(vscsi, &format);
424 if (rc) {
425 dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
426 rc);
427 return rc;
428 }
429
430 if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
431 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
432 switch (rc) {
433 case H_SUCCESS:
434 case H_DROPPED:
435 case H_CLOSED:
436 rc = ADAPT_SUCCESS;
437 break;
438
439 case H_PARAMETER:
440 case H_HARDWARE:
441 break;
442
443 default:
444 vscsi->state = UNDEFINED;
445 rc = H_HARDWARE;
446 break;
447 }
448 }
449
450 return rc;
451}
452
453/**
454 * ibmvscsis_reset_queue() - Reset CRQ Queue
455 * @vscsi: Pointer to our adapter structure
456 * @new_state: New state to establish after resetting the queue
457 *
458 * This function calls h_free_q and then calls h_reg_q and does all
459 * of the bookkeeping to get us back to where we can communicate.
460 *
461 * Actually, we don't always call h_free_crq. A problem was discovered
462 * where one partition would close and reopen his queue, which would
463 * cause his partner to get a transport event, which would cause him to
464 * close and reopen his queue, which would cause the original partition
465 * to get a transport event, etc., etc. To prevent this, we don't
466 * actually close our queue if the client initiated the reset, (i.e.
467 * either we got a transport event or we have detected that the client's
468 * queue is gone)
469 *
470 * EXECUTION ENVIRONMENT:
471 * Process environment, called with interrupt lock held
472 */
473static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
474{
475 int bytes;
476 long rc = ADAPT_SUCCESS;
477
478 pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
479
480 /* don't reset, the client did it for us */
481 if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
482 vscsi->flags &= PRESERVE_FLAG_FIELDS;
483 vscsi->rsp_q_timer.timer_pops = 0;
484 vscsi->debit = 0;
485 vscsi->credit = 0;
486 vscsi->state = new_state;
487 vio_enable_interrupts(vscsi->dma_dev);
488 } else {
489 rc = ibmvscsis_free_command_q(vscsi);
490 if (rc == ADAPT_SUCCESS) {
491 vscsi->state = new_state;
492
493 bytes = vscsi->cmd_q.size * PAGE_SIZE;
494 rc = h_reg_crq(vscsi->dds.unit_id,
495 vscsi->cmd_q.crq_token, bytes);
496 if (rc == H_CLOSED || rc == H_SUCCESS) {
497 rc = ibmvscsis_establish_new_q(vscsi,
498 new_state);
499 }
500
501 if (rc != ADAPT_SUCCESS) {
502 pr_debug("reset_queue: reg_crq rc %ld\n", rc);
503
504 vscsi->state = ERR_DISCONNECTED;
505 vscsi->flags |= RESPONSE_Q_DOWN;
506 ibmvscsis_free_command_q(vscsi);
507 }
508 } else {
509 vscsi->state = ERR_DISCONNECTED;
510 vscsi->flags |= RESPONSE_Q_DOWN;
511 }
512 }
513}
514
515/**
516 * ibmvscsis_free_cmd_resources() - Free command resources
517 * @vscsi: Pointer to our adapter structure
518 * @cmd: Command which is not longer in use
519 *
520 * Must be called with interrupt lock held.
521 */
522static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
523 struct ibmvscsis_cmd *cmd)
524{
525 struct iu_entry *iue = cmd->iue;
526
527 switch (cmd->type) {
528 case TASK_MANAGEMENT:
529 case SCSI_CDB:
530 /*
531 * When the queue goes down this value is cleared, so it
532 * cannot be cleared in this general purpose function.
533 */
534 if (vscsi->debit)
535 vscsi->debit -= 1;
536 break;
537 case ADAPTER_MAD:
538 vscsi->flags &= ~PROCESSING_MAD;
539 break;
540 case UNSET_TYPE:
541 break;
542 default:
543 dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
544 cmd->type);
545 break;
546 }
547
548 cmd->iue = NULL;
549 list_add_tail(&cmd->list, &vscsi->free_cmd);
550 srp_iu_put(iue);
551
552 if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
553 list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
554 vscsi->flags &= ~WAIT_FOR_IDLE;
555 complete(&vscsi->wait_idle);
556 }
557}
558
559/**
560 * ibmvscsis_disconnect() - Helper function to disconnect 400 * ibmvscsis_disconnect() - Helper function to disconnect
561 * @work: Pointer to work_struct, gives access to our adapter structure 401 * @work: Pointer to work_struct, gives access to our adapter structure
562 * 402 *
@@ -575,7 +415,6 @@ static void ibmvscsis_disconnect(struct work_struct *work)
575 proc_work); 415 proc_work);
576 u16 new_state; 416 u16 new_state;
577 bool wait_idle = false; 417 bool wait_idle = false;
578 long rc = ADAPT_SUCCESS;
579 418
580 spin_lock_bh(&vscsi->intr_lock); 419 spin_lock_bh(&vscsi->intr_lock);
581 new_state = vscsi->new_state; 420 new_state = vscsi->new_state;
@@ -589,7 +428,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
589 * should transitition to the new state 428 * should transitition to the new state
590 */ 429 */
591 switch (vscsi->state) { 430 switch (vscsi->state) {
592 /* Should never be called while in this state. */ 431 /* Should never be called while in this state. */
593 case NO_QUEUE: 432 case NO_QUEUE:
594 /* 433 /*
595 * Can never transition from this state; 434 * Can never transition from this state;
@@ -628,30 +467,24 @@ static void ibmvscsis_disconnect(struct work_struct *work)
628 vscsi->state = new_state; 467 vscsi->state = new_state;
629 break; 468 break;
630 469
631 /*
632 * If this is a transition into an error state.
633 * a client is attempting to establish a connection
634 * and has violated the RPA protocol.
635 * There can be nothing pending on the adapter although
636 * there can be requests in the command queue.
637 */
638 case WAIT_ENABLED: 470 case WAIT_ENABLED:
639 case PART_UP_WAIT_ENAB:
640 switch (new_state) { 471 switch (new_state) {
641 case ERR_DISCONNECT: 472 case UNCONFIGURING:
642 vscsi->flags |= RESPONSE_Q_DOWN;
643 vscsi->state = new_state; 473 vscsi->state = new_state;
474 vscsi->flags |= RESPONSE_Q_DOWN;
644 vscsi->flags &= ~(SCHEDULE_DISCONNECT | 475 vscsi->flags &= ~(SCHEDULE_DISCONNECT |
645 DISCONNECT_SCHEDULED); 476 DISCONNECT_SCHEDULED);
646 ibmvscsis_free_command_q(vscsi); 477 dma_rmb();
647 break; 478 if (vscsi->flags & CFG_SLEEPING) {
648 case ERR_DISCONNECT_RECONNECT: 479 vscsi->flags &= ~CFG_SLEEPING;
649 ibmvscsis_reset_queue(vscsi, WAIT_ENABLED); 480 complete(&vscsi->unconfig);
481 }
650 break; 482 break;
651 483
652 /* should never happen */ 484 /* should never happen */
485 case ERR_DISCONNECT:
486 case ERR_DISCONNECT_RECONNECT:
653 case WAIT_IDLE: 487 case WAIT_IDLE:
654 rc = ERROR;
655 dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", 488 dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
656 vscsi->state); 489 vscsi->state);
657 break; 490 break;
@@ -660,6 +493,13 @@ static void ibmvscsis_disconnect(struct work_struct *work)
660 493
661 case WAIT_IDLE: 494 case WAIT_IDLE:
662 switch (new_state) { 495 switch (new_state) {
496 case UNCONFIGURING:
497 vscsi->flags |= RESPONSE_Q_DOWN;
498 vscsi->state = new_state;
499 vscsi->flags &= ~(SCHEDULE_DISCONNECT |
500 DISCONNECT_SCHEDULED);
501 ibmvscsis_free_command_q(vscsi);
502 break;
663 case ERR_DISCONNECT: 503 case ERR_DISCONNECT:
664 case ERR_DISCONNECT_RECONNECT: 504 case ERR_DISCONNECT_RECONNECT:
665 vscsi->state = new_state; 505 vscsi->state = new_state;
@@ -788,7 +628,6 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
788 break; 628 break;
789 629
790 case WAIT_ENABLED: 630 case WAIT_ENABLED:
791 case PART_UP_WAIT_ENAB:
792 case WAIT_IDLE: 631 case WAIT_IDLE:
793 case WAIT_CONNECTION: 632 case WAIT_CONNECTION:
794 case CONNECTED: 633 case CONNECTED:
@@ -806,6 +645,310 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
806} 645}
807 646
808/** 647/**
648 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
649 * @vscsi: Pointer to our adapter structure
650 *
651 * Must be called with interrupt lock held.
652 */
653static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
654{
655 long rc = ADAPT_SUCCESS;
656
657 switch (vscsi->state) {
658 case NO_QUEUE:
659 case ERR_DISCONNECT:
660 case ERR_DISCONNECT_RECONNECT:
661 case ERR_DISCONNECTED:
662 case UNCONFIGURING:
663 case UNDEFINED:
664 rc = ERROR;
665 break;
666
667 case WAIT_CONNECTION:
668 vscsi->state = CONNECTED;
669 break;
670
671 case WAIT_IDLE:
672 case SRP_PROCESSING:
673 case CONNECTED:
674 case WAIT_ENABLED:
675 default:
676 rc = ERROR;
677 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
678 vscsi->state);
679 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
680 break;
681 }
682
683 return rc;
684}
685
686/**
687 * ibmvscsis_handle_init_msg() - Respond to an Init Message
688 * @vscsi: Pointer to our adapter structure
689 *
690 * Must be called with interrupt lock held.
691 */
692static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
693{
694 long rc = ADAPT_SUCCESS;
695
696 switch (vscsi->state) {
697 case WAIT_CONNECTION:
698 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
699 switch (rc) {
700 case H_SUCCESS:
701 vscsi->state = CONNECTED;
702 break;
703
704 case H_PARAMETER:
705 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
706 rc);
707 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
708 break;
709
710 case H_DROPPED:
711 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
712 rc);
713 rc = ERROR;
714 ibmvscsis_post_disconnect(vscsi,
715 ERR_DISCONNECT_RECONNECT, 0);
716 break;
717
718 case H_CLOSED:
719 pr_warn("init_msg: failed to send, rc %ld\n", rc);
720 rc = 0;
721 break;
722 }
723 break;
724
725 case UNDEFINED:
726 rc = ERROR;
727 break;
728
729 case UNCONFIGURING:
730 break;
731
732 case WAIT_ENABLED:
733 case CONNECTED:
734 case SRP_PROCESSING:
735 case WAIT_IDLE:
736 case NO_QUEUE:
737 case ERR_DISCONNECT:
738 case ERR_DISCONNECT_RECONNECT:
739 case ERR_DISCONNECTED:
740 default:
741 rc = ERROR;
742 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
743 vscsi->state);
744 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
745 break;
746 }
747
748 return rc;
749}
750
751/**
752 * ibmvscsis_init_msg() - Respond to an init message
753 * @vscsi: Pointer to our adapter structure
754 * @crq: Pointer to CRQ element containing the Init Message
755 *
756 * EXECUTION ENVIRONMENT:
757 * Interrupt, interrupt lock held
758 */
759static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
760{
761 long rc = ADAPT_SUCCESS;
762
763 pr_debug("init_msg: state 0x%hx\n", vscsi->state);
764
765 rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
766 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
767 0);
768 if (rc == H_SUCCESS) {
769 vscsi->client_data.partition_number =
770 be64_to_cpu(*(u64 *)vscsi->map_buf);
771 pr_debug("init_msg, part num %d\n",
772 vscsi->client_data.partition_number);
773 } else {
774 pr_debug("init_msg h_vioctl rc %ld\n", rc);
775 rc = ADAPT_SUCCESS;
776 }
777
778 if (crq->format == INIT_MSG) {
779 rc = ibmvscsis_handle_init_msg(vscsi);
780 } else if (crq->format == INIT_COMPLETE_MSG) {
781 rc = ibmvscsis_handle_init_compl_msg(vscsi);
782 } else {
783 rc = ERROR;
784 dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
785 (uint)crq->format);
786 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
787 }
788
789 return rc;
790}
791
792/**
793 * ibmvscsis_establish_new_q() - Establish new CRQ queue
794 * @vscsi: Pointer to our adapter structure
795 *
796 * Must be called with interrupt lock held.
797 */
798static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
799{
800 long rc = ADAPT_SUCCESS;
801 uint format;
802
803 vscsi->flags &= PRESERVE_FLAG_FIELDS;
804 vscsi->rsp_q_timer.timer_pops = 0;
805 vscsi->debit = 0;
806 vscsi->credit = 0;
807
808 rc = vio_enable_interrupts(vscsi->dma_dev);
809 if (rc) {
810 pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
811 rc);
812 return rc;
813 }
814
815 rc = ibmvscsis_check_init_msg(vscsi, &format);
816 if (rc) {
817 dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
818 rc);
819 return rc;
820 }
821
822 if (format == UNUSED_FORMAT) {
823 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
824 switch (rc) {
825 case H_SUCCESS:
826 case H_DROPPED:
827 case H_CLOSED:
828 rc = ADAPT_SUCCESS;
829 break;
830
831 case H_PARAMETER:
832 case H_HARDWARE:
833 break;
834
835 default:
836 vscsi->state = UNDEFINED;
837 rc = H_HARDWARE;
838 break;
839 }
840 } else if (format == INIT_MSG) {
841 rc = ibmvscsis_handle_init_msg(vscsi);
842 }
843
844 return rc;
845}
846
847/**
848 * ibmvscsis_reset_queue() - Reset CRQ Queue
849 * @vscsi: Pointer to our adapter structure
850 *
851 * This function calls h_free_q and then calls h_reg_q and does all
852 * of the bookkeeping to get us back to where we can communicate.
853 *
854 * Actually, we don't always call h_free_crq. A problem was discovered
855 * where one partition would close and reopen his queue, which would
856 * cause his partner to get a transport event, which would cause him to
857 * close and reopen his queue, which would cause the original partition
858 * to get a transport event, etc., etc. To prevent this, we don't
859 * actually close our queue if the client initiated the reset, (i.e.
860 * either we got a transport event or we have detected that the client's
861 * queue is gone)
862 *
863 * EXECUTION ENVIRONMENT:
864 * Process environment, called with interrupt lock held
865 */
866static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
867{
868 int bytes;
869 long rc = ADAPT_SUCCESS;
870
871 pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
872
873 /* don't reset, the client did it for us */
874 if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
875 vscsi->flags &= PRESERVE_FLAG_FIELDS;
876 vscsi->rsp_q_timer.timer_pops = 0;
877 vscsi->debit = 0;
878 vscsi->credit = 0;
879 vscsi->state = WAIT_CONNECTION;
880 vio_enable_interrupts(vscsi->dma_dev);
881 } else {
882 rc = ibmvscsis_free_command_q(vscsi);
883 if (rc == ADAPT_SUCCESS) {
884 vscsi->state = WAIT_CONNECTION;
885
886 bytes = vscsi->cmd_q.size * PAGE_SIZE;
887 rc = h_reg_crq(vscsi->dds.unit_id,
888 vscsi->cmd_q.crq_token, bytes);
889 if (rc == H_CLOSED || rc == H_SUCCESS) {
890 rc = ibmvscsis_establish_new_q(vscsi);
891 }
892
893 if (rc != ADAPT_SUCCESS) {
894 pr_debug("reset_queue: reg_crq rc %ld\n", rc);
895
896 vscsi->state = ERR_DISCONNECTED;
897 vscsi->flags |= RESPONSE_Q_DOWN;
898 ibmvscsis_free_command_q(vscsi);
899 }
900 } else {
901 vscsi->state = ERR_DISCONNECTED;
902 vscsi->flags |= RESPONSE_Q_DOWN;
903 }
904 }
905}
906
907/**
908 * ibmvscsis_free_cmd_resources() - Free command resources
909 * @vscsi: Pointer to our adapter structure
910 * @cmd: Command which is not longer in use
911 *
912 * Must be called with interrupt lock held.
913 */
914static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
915 struct ibmvscsis_cmd *cmd)
916{
917 struct iu_entry *iue = cmd->iue;
918
919 switch (cmd->type) {
920 case TASK_MANAGEMENT:
921 case SCSI_CDB:
922 /*
923 * When the queue goes down this value is cleared, so it
924 * cannot be cleared in this general purpose function.
925 */
926 if (vscsi->debit)
927 vscsi->debit -= 1;
928 break;
929 case ADAPTER_MAD:
930 vscsi->flags &= ~PROCESSING_MAD;
931 break;
932 case UNSET_TYPE:
933 break;
934 default:
935 dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
936 cmd->type);
937 break;
938 }
939
940 cmd->iue = NULL;
941 list_add_tail(&cmd->list, &vscsi->free_cmd);
942 srp_iu_put(iue);
943
944 if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
945 list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
946 vscsi->flags &= ~WAIT_FOR_IDLE;
947 complete(&vscsi->wait_idle);
948 }
949}
950
951/**
809 * ibmvscsis_trans_event() - Handle a Transport Event 952 * ibmvscsis_trans_event() - Handle a Transport Event
810 * @vscsi: Pointer to our adapter structure 953 * @vscsi: Pointer to our adapter structure
811 * @crq: Pointer to CRQ entry containing the Transport Event 954 * @crq: Pointer to CRQ entry containing the Transport Event
@@ -863,10 +1006,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
863 TRANS_EVENT)); 1006 TRANS_EVENT));
864 break; 1007 break;
865 1008
866 case PART_UP_WAIT_ENAB:
867 vscsi->state = WAIT_ENABLED;
868 break;
869
870 case SRP_PROCESSING: 1009 case SRP_PROCESSING:
871 if ((vscsi->debit > 0) || 1010 if ((vscsi->debit > 0) ||
872 !list_empty(&vscsi->schedule_q) || 1011 !list_empty(&vscsi->schedule_q) ||
@@ -895,7 +1034,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
895 } 1034 }
896 } 1035 }
897 1036
898 rc = vscsi->flags & SCHEDULE_DISCONNECT; 1037 rc = vscsi->flags & SCHEDULE_DISCONNECT;
899 1038
900 pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", 1039 pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
901 vscsi->flags, vscsi->state, rc); 1040 vscsi->flags, vscsi->state, rc);
@@ -1066,16 +1205,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1066 free_qs = true; 1205 free_qs = true;
1067 1206
1068 switch (vscsi->state) { 1207 switch (vscsi->state) {
1208 case UNCONFIGURING:
1209 ibmvscsis_free_command_q(vscsi);
1210 dma_rmb();
1211 isync();
1212 if (vscsi->flags & CFG_SLEEPING) {
1213 vscsi->flags &= ~CFG_SLEEPING;
1214 complete(&vscsi->unconfig);
1215 }
1216 break;
1069 case ERR_DISCONNECT_RECONNECT: 1217 case ERR_DISCONNECT_RECONNECT:
1070 ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION); 1218 ibmvscsis_reset_queue(vscsi);
1071 pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags); 1219 pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
1072 break; 1220 break;
1073 1221
1074 case ERR_DISCONNECT: 1222 case ERR_DISCONNECT:
1075 ibmvscsis_free_command_q(vscsi); 1223 ibmvscsis_free_command_q(vscsi);
1076 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1224 vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
1077 vscsi->flags |= RESPONSE_Q_DOWN; 1225 vscsi->flags |= RESPONSE_Q_DOWN;
1078 vscsi->state = ERR_DISCONNECTED; 1226 if (vscsi->tport.enabled)
1227 vscsi->state = ERR_DISCONNECTED;
1228 else
1229 vscsi->state = WAIT_ENABLED;
1079 pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n", 1230 pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1080 vscsi->flags, vscsi->state); 1231 vscsi->flags, vscsi->state);
1081 break; 1232 break;
@@ -1220,7 +1371,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1220 * @iue: Information Unit containing the Adapter Info MAD request 1371 * @iue: Information Unit containing the Adapter Info MAD request
1221 * 1372 *
1222 * EXECUTION ENVIRONMENT: 1373 * EXECUTION ENVIRONMENT:
1223 * Interrupt adpater lock is held 1374 * Interrupt adapter lock is held
1224 */ 1375 */
1225static long ibmvscsis_adapter_info(struct scsi_info *vscsi, 1376static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1226 struct iu_entry *iue) 1377 struct iu_entry *iue)
@@ -1620,8 +1771,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1620 be64_to_cpu(msg_hi), 1771 be64_to_cpu(msg_hi),
1621 be64_to_cpu(cmd->rsp.tag)); 1772 be64_to_cpu(cmd->rsp.tag));
1622 1773
1623 pr_debug("send_messages: tag 0x%llx, rc %ld\n", 1774 pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
1624 be64_to_cpu(cmd->rsp.tag), rc); 1775 cmd, be64_to_cpu(cmd->rsp.tag), rc);
1625 1776
1626 /* if all ok free up the command element resources */ 1777 /* if all ok free up the command element resources */
1627 if (rc == H_SUCCESS) { 1778 if (rc == H_SUCCESS) {
@@ -1691,7 +1842,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
1691 * @crq: Pointer to the CRQ entry containing the MAD request 1842 * @crq: Pointer to the CRQ entry containing the MAD request
1692 * 1843 *
1693 * EXECUTION ENVIRONMENT: 1844 * EXECUTION ENVIRONMENT:
1694 * Interrupt called with adapter lock held 1845 * Interrupt, called with adapter lock held
1695 */ 1846 */
1696static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) 1847static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
1697{ 1848{
@@ -1745,14 +1896,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
1745 1896
1746 pr_debug("mad: type %d\n", be32_to_cpu(mad->type)); 1897 pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
1747 1898
1748 if (be16_to_cpu(mad->length) < 0) { 1899 rc = ibmvscsis_process_mad(vscsi, iue);
1749 dev_err(&vscsi->dev, "mad: length is < 0\n");
1750 ibmvscsis_post_disconnect(vscsi,
1751 ERR_DISCONNECT_RECONNECT, 0);
1752 rc = SRP_VIOLATION;
1753 } else {
1754 rc = ibmvscsis_process_mad(vscsi, iue);
1755 }
1756 1900
1757 pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status), 1901 pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
1758 rc); 1902 rc);
@@ -1864,7 +2008,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
1864 break; 2008 break;
1865 case H_PERMISSION: 2009 case H_PERMISSION:
1866 if (connection_broken(vscsi)) 2010 if (connection_broken(vscsi))
1867 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; 2011 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
1868 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", 2012 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
1869 rc); 2013 rc);
1870 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 2014 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
@@ -2187,156 +2331,6 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2187} 2331}
2188 2332
2189/** 2333/**
2190 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
2191 * @vscsi: Pointer to our adapter structure
2192 *
2193 * Must be called with interrupt lock held.
2194 */
2195static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
2196{
2197 long rc = ADAPT_SUCCESS;
2198
2199 switch (vscsi->state) {
2200 case NO_QUEUE:
2201 case ERR_DISCONNECT:
2202 case ERR_DISCONNECT_RECONNECT:
2203 case ERR_DISCONNECTED:
2204 case UNCONFIGURING:
2205 case UNDEFINED:
2206 rc = ERROR;
2207 break;
2208
2209 case WAIT_CONNECTION:
2210 vscsi->state = CONNECTED;
2211 break;
2212
2213 case WAIT_IDLE:
2214 case SRP_PROCESSING:
2215 case CONNECTED:
2216 case WAIT_ENABLED:
2217 case PART_UP_WAIT_ENAB:
2218 default:
2219 rc = ERROR;
2220 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
2221 vscsi->state);
2222 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2223 break;
2224 }
2225
2226 return rc;
2227}
2228
2229/**
2230 * ibmvscsis_handle_init_msg() - Respond to an Init Message
2231 * @vscsi: Pointer to our adapter structure
2232 *
2233 * Must be called with interrupt lock held.
2234 */
2235static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
2236{
2237 long rc = ADAPT_SUCCESS;
2238
2239 switch (vscsi->state) {
2240 case WAIT_ENABLED:
2241 vscsi->state = PART_UP_WAIT_ENAB;
2242 break;
2243
2244 case WAIT_CONNECTION:
2245 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
2246 switch (rc) {
2247 case H_SUCCESS:
2248 vscsi->state = CONNECTED;
2249 break;
2250
2251 case H_PARAMETER:
2252 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
2253 rc);
2254 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2255 break;
2256
2257 case H_DROPPED:
2258 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
2259 rc);
2260 rc = ERROR;
2261 ibmvscsis_post_disconnect(vscsi,
2262 ERR_DISCONNECT_RECONNECT, 0);
2263 break;
2264
2265 case H_CLOSED:
2266 pr_warn("init_msg: failed to send, rc %ld\n", rc);
2267 rc = 0;
2268 break;
2269 }
2270 break;
2271
2272 case UNDEFINED:
2273 rc = ERROR;
2274 break;
2275
2276 case UNCONFIGURING:
2277 break;
2278
2279 case PART_UP_WAIT_ENAB:
2280 case CONNECTED:
2281 case SRP_PROCESSING:
2282 case WAIT_IDLE:
2283 case NO_QUEUE:
2284 case ERR_DISCONNECT:
2285 case ERR_DISCONNECT_RECONNECT:
2286 case ERR_DISCONNECTED:
2287 default:
2288 rc = ERROR;
2289 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
2290 vscsi->state);
2291 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2292 break;
2293 }
2294
2295 return rc;
2296}
2297
2298/**
2299 * ibmvscsis_init_msg() - Respond to an init message
2300 * @vscsi: Pointer to our adapter structure
2301 * @crq: Pointer to CRQ element containing the Init Message
2302 *
2303 * EXECUTION ENVIRONMENT:
2304 * Interrupt, interrupt lock held
2305 */
2306static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
2307{
2308 long rc = ADAPT_SUCCESS;
2309
2310 pr_debug("init_msg: state 0x%hx\n", vscsi->state);
2311
2312 rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
2313 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
2314 0);
2315 if (rc == H_SUCCESS) {
2316 vscsi->client_data.partition_number =
2317 be64_to_cpu(*(u64 *)vscsi->map_buf);
2318 pr_debug("init_msg, part num %d\n",
2319 vscsi->client_data.partition_number);
2320 } else {
2321 pr_debug("init_msg h_vioctl rc %ld\n", rc);
2322 rc = ADAPT_SUCCESS;
2323 }
2324
2325 if (crq->format == INIT_MSG) {
2326 rc = ibmvscsis_handle_init_msg(vscsi);
2327 } else if (crq->format == INIT_COMPLETE_MSG) {
2328 rc = ibmvscsis_handle_init_compl_msg(vscsi);
2329 } else {
2330 rc = ERROR;
2331 dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
2332 (uint)crq->format);
2333 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2334 }
2335
2336 return rc;
2337}
2338
2339/**
2340 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue. 2334 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2341 * @vscsi: Pointer to our adapter structure 2335 * @vscsi: Pointer to our adapter structure
2342 * @crq: Pointer to CRQ element containing the SRP request 2336 * @crq: Pointer to CRQ element containing the SRP request
@@ -2391,7 +2385,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi,
2391 break; 2385 break;
2392 2386
2393 case VALID_TRANS_EVENT: 2387 case VALID_TRANS_EVENT:
2394 rc = ibmvscsis_trans_event(vscsi, crq); 2388 rc = ibmvscsis_trans_event(vscsi, crq);
2395 break; 2389 break;
2396 2390
2397 case VALID_INIT_MSG: 2391 case VALID_INIT_MSG:
@@ -2522,7 +2516,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2522 dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", 2516 dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
2523 srp->tag); 2517 srp->tag);
2524 goto fail; 2518 goto fail;
2525 return;
2526 } 2519 }
2527 2520
2528 cmd->rsp.sol_not = srp->sol_not; 2521 cmd->rsp.sol_not = srp->sol_not;
@@ -2559,6 +2552,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2559 data_len, attr, dir, 0); 2552 data_len, attr, dir, 0);
2560 if (rc) { 2553 if (rc) {
2561 dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc); 2554 dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
2555 spin_lock_bh(&vscsi->intr_lock);
2556 list_del(&cmd->list);
2557 ibmvscsis_free_cmd_resources(vscsi, cmd);
2558 spin_unlock_bh(&vscsi->intr_lock);
2562 goto fail; 2559 goto fail;
2563 } 2560 }
2564 return; 2561 return;
@@ -2638,6 +2635,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2638 if (rc) { 2635 if (rc) {
2639 dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", 2636 dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
2640 rc); 2637 rc);
2638 spin_lock_bh(&vscsi->intr_lock);
2639 list_del(&cmd->list);
2640 spin_unlock_bh(&vscsi->intr_lock);
2641 cmd->se_cmd.se_tmr_req->response = 2641 cmd->se_cmd.se_tmr_req->response =
2642 TMR_FUNCTION_REJECTED; 2642 TMR_FUNCTION_REJECTED;
2643 } 2643 }
@@ -2786,36 +2786,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
2786} 2786}
2787 2787
2788/** 2788/**
2789 * ibmvscsis_check_q() - Helper function to Check Init Message Valid
2790 * @vscsi: Pointer to our adapter structure
2791 *
2792 * Checks if a initialize message was queued by the initiatior
2793 * while the timing window was open. This function is called from
2794 * probe after the CRQ is created and interrupts are enabled.
2795 * It would only be used by adapters who wait for some event before
2796 * completing the init handshake with the client. For ibmvscsi, this
2797 * event is waiting for the port to be enabled.
2798 *
2799 * EXECUTION ENVIRONMENT:
2800 * Process level only, interrupt lock held
2801 */
2802static long ibmvscsis_check_q(struct scsi_info *vscsi)
2803{
2804 uint format;
2805 long rc;
2806
2807 rc = ibmvscsis_check_init_msg(vscsi, &format);
2808 if (rc)
2809 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2810 else if (format == UNUSED_FORMAT)
2811 vscsi->state = WAIT_ENABLED;
2812 else
2813 vscsi->state = PART_UP_WAIT_ENAB;
2814
2815 return rc;
2816}
2817
2818/**
2819 * ibmvscsis_enable_change_state() - Set new state based on enabled status 2789 * ibmvscsis_enable_change_state() - Set new state based on enabled status
2820 * @vscsi: Pointer to our adapter structure 2790 * @vscsi: Pointer to our adapter structure
2821 * 2791 *
@@ -2826,77 +2796,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi)
2826 */ 2796 */
2827static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) 2797static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
2828{ 2798{
2799 int bytes;
2829 long rc = ADAPT_SUCCESS; 2800 long rc = ADAPT_SUCCESS;
2830 2801
2831handle_state_change: 2802 bytes = vscsi->cmd_q.size * PAGE_SIZE;
2832 switch (vscsi->state) { 2803 rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
2833 case WAIT_ENABLED: 2804 if (rc == H_CLOSED || rc == H_SUCCESS) {
2834 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); 2805 vscsi->state = WAIT_CONNECTION;
2835 switch (rc) { 2806 rc = ibmvscsis_establish_new_q(vscsi);
2836 case H_SUCCESS: 2807 }
2837 case H_DROPPED:
2838 case H_CLOSED:
2839 vscsi->state = WAIT_CONNECTION;
2840 rc = ADAPT_SUCCESS;
2841 break;
2842
2843 case H_PARAMETER:
2844 break;
2845
2846 case H_HARDWARE:
2847 break;
2848
2849 default:
2850 vscsi->state = UNDEFINED;
2851 rc = H_HARDWARE;
2852 break;
2853 }
2854 break;
2855 case PART_UP_WAIT_ENAB:
2856 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
2857 switch (rc) {
2858 case H_SUCCESS:
2859 vscsi->state = CONNECTED;
2860 rc = ADAPT_SUCCESS;
2861 break;
2862
2863 case H_DROPPED:
2864 case H_CLOSED:
2865 vscsi->state = WAIT_ENABLED;
2866 goto handle_state_change;
2867
2868 case H_PARAMETER:
2869 break;
2870
2871 case H_HARDWARE:
2872 break;
2873
2874 default:
2875 rc = H_HARDWARE;
2876 break;
2877 }
2878 break;
2879
2880 case WAIT_CONNECTION:
2881 case WAIT_IDLE:
2882 case SRP_PROCESSING:
2883 case CONNECTED:
2884 rc = ADAPT_SUCCESS;
2885 break;
2886 /* should not be able to get here */
2887 case UNCONFIGURING:
2888 rc = ERROR;
2889 vscsi->state = UNDEFINED;
2890 break;
2891 2808
2892 /* driver should never allow this to happen */ 2809 if (rc != ADAPT_SUCCESS) {
2893 case ERR_DISCONNECT: 2810 vscsi->state = ERR_DISCONNECTED;
2894 case ERR_DISCONNECT_RECONNECT: 2811 vscsi->flags |= RESPONSE_Q_DOWN;
2895 default:
2896 dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
2897 vscsi->state);
2898 rc = ADAPT_SUCCESS;
2899 break;
2900 } 2812 }
2901 2813
2902 return rc; 2814 return rc;
@@ -2916,7 +2828,6 @@ handle_state_change:
2916 */ 2828 */
2917static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) 2829static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
2918{ 2830{
2919 long rc = 0;
2920 int pages; 2831 int pages;
2921 struct vio_dev *vdev = vscsi->dma_dev; 2832 struct vio_dev *vdev = vscsi->dma_dev;
2922 2833
@@ -2940,22 +2851,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
2940 return -ENOMEM; 2851 return -ENOMEM;
2941 } 2852 }
2942 2853
2943 rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE); 2854 return 0;
2944 if (rc) {
2945 if (rc == H_CLOSED) {
2946 vscsi->state = WAIT_ENABLED;
2947 rc = 0;
2948 } else {
2949 dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
2950 PAGE_SIZE, DMA_BIDIRECTIONAL);
2951 free_page((unsigned long)vscsi->cmd_q.base_addr);
2952 rc = -ENODEV;
2953 }
2954 } else {
2955 vscsi->state = WAIT_ENABLED;
2956 }
2957
2958 return rc;
2959} 2855}
2960 2856
2961/** 2857/**
@@ -3270,7 +3166,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
3270 /* 3166 /*
3271 * if we are in a path where we are waiting for all pending commands 3167 * if we are in a path where we are waiting for all pending commands
3272 * to complete because we received a transport event and anything in 3168 * to complete because we received a transport event and anything in
3273 * the command queue is for a new connection, do nothing 3169 * the command queue is for a new connection, do nothing
3274 */ 3170 */
3275 if (TARGET_STOP(vscsi)) { 3171 if (TARGET_STOP(vscsi)) {
3276 vio_enable_interrupts(vscsi->dma_dev); 3172 vio_enable_interrupts(vscsi->dma_dev);
@@ -3314,7 +3210,7 @@ cmd_work:
3314 * everything but transport events on the queue 3210 * everything but transport events on the queue
3315 * 3211 *
3316 * need to decrement the queue index so we can 3212 * need to decrement the queue index so we can
3317 * look at the elment again 3213 * look at the element again
3318 */ 3214 */
3319 if (vscsi->cmd_q.index) 3215 if (vscsi->cmd_q.index)
3320 vscsi->cmd_q.index -= 1; 3216 vscsi->cmd_q.index -= 1;
@@ -3378,7 +3274,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3378 INIT_LIST_HEAD(&vscsi->waiting_rsp); 3274 INIT_LIST_HEAD(&vscsi->waiting_rsp);
3379 INIT_LIST_HEAD(&vscsi->active_q); 3275 INIT_LIST_HEAD(&vscsi->active_q);
3380 3276
3381 snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev)); 3277 snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
3278 dev_name(&vdev->dev));
3382 3279
3383 pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name); 3280 pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
3384 3281
@@ -3393,6 +3290,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3393 strncat(vscsi->eye, vdev->name, MAX_EYE); 3290 strncat(vscsi->eye, vdev->name, MAX_EYE);
3394 3291
3395 vscsi->dds.unit_id = vdev->unit_address; 3292 vscsi->dds.unit_id = vdev->unit_address;
3293 strncpy(vscsi->dds.partition_name, partition_name,
3294 sizeof(vscsi->dds.partition_name));
3295 vscsi->dds.partition_num = partition_number;
3396 3296
3397 spin_lock_bh(&ibmvscsis_dev_lock); 3297 spin_lock_bh(&ibmvscsis_dev_lock);
3398 list_add_tail(&vscsi->list, &ibmvscsis_dev_list); 3298 list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
@@ -3469,6 +3369,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3469 (unsigned long)vscsi); 3369 (unsigned long)vscsi);
3470 3370
3471 init_completion(&vscsi->wait_idle); 3371 init_completion(&vscsi->wait_idle);
3372 init_completion(&vscsi->unconfig);
3472 3373
3473 snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); 3374 snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
3474 vscsi->work_q = create_workqueue(wq_name); 3375 vscsi->work_q = create_workqueue(wq_name);
@@ -3485,31 +3386,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3485 goto destroy_WQ; 3386 goto destroy_WQ;
3486 } 3387 }
3487 3388
3488 spin_lock_bh(&vscsi->intr_lock); 3389 vscsi->state = WAIT_ENABLED;
3489 vio_enable_interrupts(vdev);
3490 if (rc) {
3491 dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
3492 rc = -ENODEV;
3493 spin_unlock_bh(&vscsi->intr_lock);
3494 goto free_irq;
3495 }
3496
3497 if (ibmvscsis_check_q(vscsi)) {
3498 rc = ERROR;
3499 dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
3500 spin_unlock_bh(&vscsi->intr_lock);
3501 goto disable_interrupt;
3502 }
3503 spin_unlock_bh(&vscsi->intr_lock);
3504 3390
3505 dev_set_drvdata(&vdev->dev, vscsi); 3391 dev_set_drvdata(&vdev->dev, vscsi);
3506 3392
3507 return 0; 3393 return 0;
3508 3394
3509disable_interrupt:
3510 vio_disable_interrupts(vdev);
3511free_irq:
3512 free_irq(vdev->irq, vscsi);
3513destroy_WQ: 3395destroy_WQ:
3514 destroy_workqueue(vscsi->work_q); 3396 destroy_workqueue(vscsi->work_q);
3515unmap_buf: 3397unmap_buf:
@@ -3543,10 +3425,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
3543 3425
3544 pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); 3426 pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3545 3427
3546 /* 3428 spin_lock_bh(&vscsi->intr_lock);
3547 * TBD: Need to handle if there are commands on the waiting_rsp q 3429 ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
3548 * Actually, can there still be cmds outstanding to tcm? 3430 vscsi->flags |= CFG_SLEEPING;
3549 */ 3431 spin_unlock_bh(&vscsi->intr_lock);
3432 wait_for_completion(&vscsi->unconfig);
3550 3433
3551 vio_disable_interrupts(vdev); 3434 vio_disable_interrupts(vdev);
3552 free_irq(vdev->irq, vscsi); 3435 free_irq(vdev->irq, vscsi);
@@ -3555,7 +3438,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
3555 DMA_BIDIRECTIONAL); 3438 DMA_BIDIRECTIONAL);
3556 kfree(vscsi->map_buf); 3439 kfree(vscsi->map_buf);
3557 tasklet_kill(&vscsi->work_task); 3440 tasklet_kill(&vscsi->work_task);
3558 ibmvscsis_unregister_command_q(vscsi);
3559 ibmvscsis_destroy_command_q(vscsi); 3441 ibmvscsis_destroy_command_q(vscsi);
3560 ibmvscsis_freetimer(vscsi); 3442 ibmvscsis_freetimer(vscsi);
3561 ibmvscsis_free_cmds(vscsi); 3443 ibmvscsis_free_cmds(vscsi);
@@ -3609,7 +3491,7 @@ static int ibmvscsis_get_system_info(void)
3609 3491
3610 num = of_get_property(rootdn, "ibm,partition-no", NULL); 3492 num = of_get_property(rootdn, "ibm,partition-no", NULL);
3611 if (num) 3493 if (num)
3612 partition_number = *num; 3494 partition_number = of_read_number(num, 1);
3613 3495
3614 of_node_put(rootdn); 3496 of_node_put(rootdn);
3615 3497
@@ -3903,18 +3785,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
3903 } 3785 }
3904 3786
3905 if (tmp) { 3787 if (tmp) {
3906 tport->enabled = true;
3907 spin_lock_bh(&vscsi->intr_lock); 3788 spin_lock_bh(&vscsi->intr_lock);
3789 tport->enabled = true;
3908 lrc = ibmvscsis_enable_change_state(vscsi); 3790 lrc = ibmvscsis_enable_change_state(vscsi);
3909 if (lrc) 3791 if (lrc)
3910 pr_err("enable_change_state failed, rc %ld state %d\n", 3792 pr_err("enable_change_state failed, rc %ld state %d\n",
3911 lrc, vscsi->state); 3793 lrc, vscsi->state);
3912 spin_unlock_bh(&vscsi->intr_lock); 3794 spin_unlock_bh(&vscsi->intr_lock);
3913 } else { 3795 } else {
3796 spin_lock_bh(&vscsi->intr_lock);
3914 tport->enabled = false; 3797 tport->enabled = false;
3798 /* This simulates the server going down */
3799 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
3800 spin_unlock_bh(&vscsi->intr_lock);
3915 } 3801 }
3916 3802
3917 pr_debug("tpg_enable_store, state %d\n", vscsi->state); 3803 pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
3918 3804
3919 return count; 3805 return count;
3920} 3806}
@@ -3983,10 +3869,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = {
3983ATTRIBUTE_GROUPS(ibmvscsis_dev); 3869ATTRIBUTE_GROUPS(ibmvscsis_dev);
3984 3870
3985static struct class ibmvscsis_class = { 3871static struct class ibmvscsis_class = {
3986 .name = "ibmvscsis", 3872 .name = "ibmvscsis",
3987 .dev_release = ibmvscsis_dev_release, 3873 .dev_release = ibmvscsis_dev_release,
3988 .class_attrs = ibmvscsis_class_attrs, 3874 .class_attrs = ibmvscsis_class_attrs,
3989 .dev_groups = ibmvscsis_dev_groups, 3875 .dev_groups = ibmvscsis_dev_groups,
3990}; 3876};
3991 3877
3992static struct vio_device_id ibmvscsis_device_table[] = { 3878static struct vio_device_id ibmvscsis_device_table[] = {
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 981a0c992b6c..98b0ca79a5c5 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -204,8 +204,6 @@ struct scsi_info {
204 struct list_head waiting_rsp; 204 struct list_head waiting_rsp;
205#define NO_QUEUE 0x00 205#define NO_QUEUE 0x00
206#define WAIT_ENABLED 0X01 206#define WAIT_ENABLED 0X01
207 /* driver has received an initialize command */
208#define PART_UP_WAIT_ENAB 0x02
209#define WAIT_CONNECTION 0x04 207#define WAIT_CONNECTION 0x04
210 /* have established a connection */ 208 /* have established a connection */
211#define CONNECTED 0x08 209#define CONNECTED 0x08
@@ -259,6 +257,8 @@ struct scsi_info {
259#define SCHEDULE_DISCONNECT 0x00400 257#define SCHEDULE_DISCONNECT 0x00400
260 /* disconnect handler is scheduled */ 258 /* disconnect handler is scheduled */
261#define DISCONNECT_SCHEDULED 0x00800 259#define DISCONNECT_SCHEDULED 0x00800
260 /* remove function is sleeping */
261#define CFG_SLEEPING 0x01000
262 u32 flags; 262 u32 flags;
263 /* adapter lock */ 263 /* adapter lock */
264 spinlock_t intr_lock; 264 spinlock_t intr_lock;
@@ -287,6 +287,7 @@ struct scsi_info {
287 287
288 struct workqueue_struct *work_q; 288 struct workqueue_struct *work_q;
289 struct completion wait_idle; 289 struct completion wait_idle;
290 struct completion unconfig;
290 struct device dev; 291 struct device dev;
291 struct vio_dev *dma_dev; 292 struct vio_dev *dma_dev;
292 struct srp_target target; 293 struct srp_target target;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 532474109624..835c59c777f2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -186,16 +186,16 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
186}; 186};
187 187
188static const struct ipr_chip_t ipr_chip[] = { 188static const struct ipr_chip_t ipr_chip[] = {
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } 198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199}; 199};
200 200
201static int ipr_max_bus_speeds[] = { 201static int ipr_max_bus_speeds[] = {
@@ -9439,23 +9439,11 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9439static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) 9439static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9440{ 9440{
9441 struct pci_dev *pdev = ioa_cfg->pdev; 9441 struct pci_dev *pdev = ioa_cfg->pdev;
9442 int i;
9442 9443
9443 if (ioa_cfg->intr_flag == IPR_USE_MSI || 9444 for (i = 0; i < ioa_cfg->nvectors; i++)
9444 ioa_cfg->intr_flag == IPR_USE_MSIX) { 9445 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9445 int i; 9446 pci_free_irq_vectors(pdev);
9446 for (i = 0; i < ioa_cfg->nvectors; i++)
9447 free_irq(ioa_cfg->vectors_info[i].vec,
9448 &ioa_cfg->hrrq[i]);
9449 } else
9450 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9451
9452 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9453 pci_disable_msi(pdev);
9454 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9455 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9456 pci_disable_msix(pdev);
9457 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9458 }
9459} 9447}
9460 9448
9461/** 9449/**
@@ -9883,45 +9871,6 @@ static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9883 } 9871 }
9884} 9872}
9885 9873
9886static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9887{
9888 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9889 int i, vectors;
9890
9891 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9892 entries[i].entry = i;
9893
9894 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9895 entries, 1, ipr_number_of_msix);
9896 if (vectors < 0) {
9897 ipr_wait_for_pci_err_recovery(ioa_cfg);
9898 return vectors;
9899 }
9900
9901 for (i = 0; i < vectors; i++)
9902 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9903 ioa_cfg->nvectors = vectors;
9904
9905 return 0;
9906}
9907
9908static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9909{
9910 int i, vectors;
9911
9912 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9913 if (vectors < 0) {
9914 ipr_wait_for_pci_err_recovery(ioa_cfg);
9915 return vectors;
9916 }
9917
9918 for (i = 0; i < vectors; i++)
9919 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9920 ioa_cfg->nvectors = vectors;
9921
9922 return 0;
9923}
9924
9925static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) 9874static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9926{ 9875{
9927 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; 9876 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
@@ -9934,19 +9883,20 @@ static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9934 } 9883 }
9935} 9884}
9936 9885
9937static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg) 9886static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
9887 struct pci_dev *pdev)
9938{ 9888{
9939 int i, rc; 9889 int i, rc;
9940 9890
9941 for (i = 1; i < ioa_cfg->nvectors; i++) { 9891 for (i = 1; i < ioa_cfg->nvectors; i++) {
9942 rc = request_irq(ioa_cfg->vectors_info[i].vec, 9892 rc = request_irq(pci_irq_vector(pdev, i),
9943 ipr_isr_mhrrq, 9893 ipr_isr_mhrrq,
9944 0, 9894 0,
9945 ioa_cfg->vectors_info[i].desc, 9895 ioa_cfg->vectors_info[i].desc,
9946 &ioa_cfg->hrrq[i]); 9896 &ioa_cfg->hrrq[i]);
9947 if (rc) { 9897 if (rc) {
9948 while (--i >= 0) 9898 while (--i >= 0)
9949 free_irq(ioa_cfg->vectors_info[i].vec, 9899 free_irq(pci_irq_vector(pdev, i),
9950 &ioa_cfg->hrrq[i]); 9900 &ioa_cfg->hrrq[i]);
9951 return rc; 9901 return rc;
9952 } 9902 }
@@ -9984,8 +9934,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
9984 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. 9934 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9985 * @pdev: PCI device struct 9935 * @pdev: PCI device struct
9986 * 9936 *
9987 * Description: The return value from pci_enable_msi_range() can not always be 9937 * Description: This routine sets up and initiates a test interrupt to determine
9988 * trusted. This routine sets up and initiates a test interrupt to determine
9989 * if the interrupt is received via the ipr_test_intr() service routine. 9938 * if the interrupt is received via the ipr_test_intr() service routine.
9990 * If the tests fails, the driver will fall back to LSI. 9939 * If the tests fails, the driver will fall back to LSI.
9991 * 9940 *
@@ -9997,6 +9946,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9997 int rc; 9946 int rc;
9998 volatile u32 int_reg; 9947 volatile u32 int_reg;
9999 unsigned long lock_flags = 0; 9948 unsigned long lock_flags = 0;
9949 int irq = pci_irq_vector(pdev, 0);
10000 9950
10001 ENTER; 9951 ENTER;
10002 9952
@@ -10008,15 +9958,12 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10008 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 9958 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10009 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9959 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10010 9960
10011 if (ioa_cfg->intr_flag == IPR_USE_MSIX) 9961 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10012 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10013 else
10014 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10015 if (rc) { 9962 if (rc) {
10016 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); 9963 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10017 return rc; 9964 return rc;
10018 } else if (ipr_debug) 9965 } else if (ipr_debug)
10019 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); 9966 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10020 9967
10021 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); 9968 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10022 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 9969 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
@@ -10033,10 +9980,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10033 9980
10034 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9981 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10035 9982
10036 if (ioa_cfg->intr_flag == IPR_USE_MSIX) 9983 free_irq(irq, ioa_cfg);
10037 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
10038 else
10039 free_irq(pdev->irq, ioa_cfg);
10040 9984
10041 LEAVE; 9985 LEAVE;
10042 9986
@@ -10060,6 +10004,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
10060 int rc = PCIBIOS_SUCCESSFUL; 10004 int rc = PCIBIOS_SUCCESSFUL;
10061 volatile u32 mask, uproc, interrupts; 10005 volatile u32 mask, uproc, interrupts;
10062 unsigned long lock_flags, driver_lock_flags; 10006 unsigned long lock_flags, driver_lock_flags;
10007 unsigned int irq_flag;
10063 10008
10064 ENTER; 10009 ENTER;
10065 10010
@@ -10175,18 +10120,18 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
10175 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS; 10120 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10176 } 10121 }
10177 10122
10178 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && 10123 irq_flag = PCI_IRQ_LEGACY;
10179 ipr_enable_msix(ioa_cfg) == 0) 10124 if (ioa_cfg->ipr_chip->has_msi)
10180 ioa_cfg->intr_flag = IPR_USE_MSIX; 10125 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10181 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && 10126 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10182 ipr_enable_msi(ioa_cfg) == 0) 10127 if (rc < 0) {
10183 ioa_cfg->intr_flag = IPR_USE_MSI; 10128 ipr_wait_for_pci_err_recovery(ioa_cfg);
10184 else { 10129 goto cleanup_nomem;
10185 ioa_cfg->intr_flag = IPR_USE_LSI;
10186 ioa_cfg->clear_isr = 1;
10187 ioa_cfg->nvectors = 1;
10188 dev_info(&pdev->dev, "Cannot enable MSI.\n");
10189 } 10130 }
10131 ioa_cfg->nvectors = rc;
10132
10133 if (!pdev->msi_enabled && !pdev->msix_enabled)
10134 ioa_cfg->clear_isr = 1;
10190 10135
10191 pci_set_master(pdev); 10136 pci_set_master(pdev);
10192 10137
@@ -10199,33 +10144,23 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
10199 } 10144 }
10200 } 10145 }
10201 10146
10202 if (ioa_cfg->intr_flag == IPR_USE_MSI || 10147 if (pdev->msi_enabled || pdev->msix_enabled) {
10203 ioa_cfg->intr_flag == IPR_USE_MSIX) {
10204 rc = ipr_test_msi(ioa_cfg, pdev); 10148 rc = ipr_test_msi(ioa_cfg, pdev);
10205 if (rc == -EOPNOTSUPP) { 10149 switch (rc) {
10150 case 0:
10151 dev_info(&pdev->dev,
10152 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10153 pdev->msix_enabled ? "-X" : "");
10154 break;
10155 case -EOPNOTSUPP:
10206 ipr_wait_for_pci_err_recovery(ioa_cfg); 10156 ipr_wait_for_pci_err_recovery(ioa_cfg);
10207 if (ioa_cfg->intr_flag == IPR_USE_MSI) { 10157 pci_free_irq_vectors(pdev);
10208 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
10209 pci_disable_msi(pdev);
10210 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
10211 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
10212 pci_disable_msix(pdev);
10213 }
10214 10158
10215 ioa_cfg->intr_flag = IPR_USE_LSI;
10216 ioa_cfg->nvectors = 1; 10159 ioa_cfg->nvectors = 1;
10217 } 10160 ioa_cfg->clear_isr = 1;
10218 else if (rc) 10161 break;
10162 default:
10219 goto out_msi_disable; 10163 goto out_msi_disable;
10220 else {
10221 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10222 dev_info(&pdev->dev,
10223 "Request for %d MSIs succeeded with starting IRQ: %d\n",
10224 ioa_cfg->nvectors, pdev->irq);
10225 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10226 dev_info(&pdev->dev,
10227 "Request for %d MSIXs succeeded.",
10228 ioa_cfg->nvectors);
10229 } 10164 }
10230 } 10165 }
10231 10166
@@ -10273,15 +10208,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
10273 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 10208 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10209 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10275 10210
10276 if (ioa_cfg->intr_flag == IPR_USE_MSI 10211 if (pdev->msi_enabled || pdev->msix_enabled) {
10277 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10278 name_msi_vectors(ioa_cfg); 10212 name_msi_vectors(ioa_cfg);
10279 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr, 10213 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10280 0,
10281 ioa_cfg->vectors_info[0].desc, 10214 ioa_cfg->vectors_info[0].desc,
10282 &ioa_cfg->hrrq[0]); 10215 &ioa_cfg->hrrq[0]);
10283 if (!rc) 10216 if (!rc)
10284 rc = ipr_request_other_msi_irqs(ioa_cfg); 10217 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10285 } else { 10218 } else {
10286 rc = request_irq(pdev->irq, ipr_isr, 10219 rc = request_irq(pdev->irq, ipr_isr,
10287 IRQF_SHARED, 10220 IRQF_SHARED,
@@ -10323,10 +10256,7 @@ cleanup_nolog:
10323 ipr_free_mem(ioa_cfg); 10256 ipr_free_mem(ioa_cfg);
10324out_msi_disable: 10257out_msi_disable:
10325 ipr_wait_for_pci_err_recovery(ioa_cfg); 10258 ipr_wait_for_pci_err_recovery(ioa_cfg);
10326 if (ioa_cfg->intr_flag == IPR_USE_MSI) 10259 pci_free_irq_vectors(pdev);
10327 pci_disable_msi(pdev);
10328 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10329 pci_disable_msix(pdev);
10330cleanup_nomem: 10260cleanup_nomem:
10331 iounmap(ipr_regs); 10261 iounmap(ipr_regs);
10332out_disable: 10262out_disable:
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 8995053d01b3..b7d2e98eb45b 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1413,10 +1413,7 @@ struct ipr_chip_cfg_t {
1413struct ipr_chip_t { 1413struct ipr_chip_t {
1414 u16 vendor; 1414 u16 vendor;
1415 u16 device; 1415 u16 device;
1416 u16 intr_type; 1416 bool has_msi;
1417#define IPR_USE_LSI 0x00
1418#define IPR_USE_MSI 0x01
1419#define IPR_USE_MSIX 0x02
1420 u16 sis_type; 1417 u16 sis_type;
1421#define IPR_SIS32 0x00 1418#define IPR_SIS32 0x00
1422#define IPR_SIS64 0x01 1419#define IPR_SIS64 0x01
@@ -1593,11 +1590,9 @@ struct ipr_ioa_cfg {
1593 struct ipr_cmnd **ipr_cmnd_list; 1590 struct ipr_cmnd **ipr_cmnd_list;
1594 dma_addr_t *ipr_cmnd_list_dma; 1591 dma_addr_t *ipr_cmnd_list_dma;
1595 1592
1596 u16 intr_flag;
1597 unsigned int nvectors; 1593 unsigned int nvectors;
1598 1594
1599 struct { 1595 struct {
1600 unsigned short vec;
1601 char desc[22]; 1596 char desc[22];
1602 } vectors_info[IPR_MAX_MSIX_VECTORS]; 1597 } vectors_info[IPR_MAX_MSIX_VECTORS];
1603 1598
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 02cb76fd4420..3419e1bcdff6 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -2241,9 +2241,6 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
2241 uint8_t minor; 2241 uint8_t minor;
2242 uint8_t subminor; 2242 uint8_t subminor;
2243 uint8_t *buffer; 2243 uint8_t *buffer;
2244 char hexDigits[] =
2245 { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
2246 'D', 'E', 'F' };
2247 2244
2248 METHOD_TRACE("ips_get_bios_version", 1); 2245 METHOD_TRACE("ips_get_bios_version", 1);
2249 2246
@@ -2374,13 +2371,13 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
2374 } 2371 }
2375 } 2372 }
2376 2373
2377 ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4]; 2374 ha->bios_version[0] = hex_asc_upper_hi(major);
2378 ha->bios_version[1] = '.'; 2375 ha->bios_version[1] = '.';
2379 ha->bios_version[2] = hexDigits[major & 0x0F]; 2376 ha->bios_version[2] = hex_asc_upper_lo(major);
2380 ha->bios_version[3] = hexDigits[subminor]; 2377 ha->bios_version[3] = hex_asc_upper_lo(subminor);
2381 ha->bios_version[4] = '.'; 2378 ha->bios_version[4] = '.';
2382 ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4]; 2379 ha->bios_version[5] = hex_asc_upper_hi(minor);
2383 ha->bios_version[6] = hexDigits[minor & 0x0F]; 2380 ha->bios_version[6] = hex_asc_upper_lo(minor);
2384 ha->bios_version[7] = 0; 2381 ha->bios_version[7] = 0;
2385} 2382}
2386 2383
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 22a9bb1abae1..b3539928073c 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -295,7 +295,6 @@ enum sci_controller_states {
295#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS) 295#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
296 296
297struct isci_pci_info { 297struct isci_pci_info {
298 struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
299 struct isci_host *hosts[SCI_MAX_CONTROLLERS]; 298 struct isci_host *hosts[SCI_MAX_CONTROLLERS];
300 struct isci_orom *orom; 299 struct isci_orom *orom;
301}; 300};
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 77128d680e3b..0b5b5db0d0f8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -350,16 +350,12 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
350 */ 350 */
351 num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT; 351 num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
352 352
353 for (i = 0; i < num_msix; i++) 353 err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX);
354 pci_info->msix_entries[i].entry = i; 354 if (err < 0)
355
356 err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix);
357 if (err)
358 goto intx; 355 goto intx;
359 356
360 for (i = 0; i < num_msix; i++) { 357 for (i = 0; i < num_msix; i++) {
361 int id = i / SCI_NUM_MSI_X_INT; 358 int id = i / SCI_NUM_MSI_X_INT;
362 struct msix_entry *msix = &pci_info->msix_entries[i];
363 irq_handler_t isr; 359 irq_handler_t isr;
364 360
365 ihost = pci_info->hosts[id]; 361 ihost = pci_info->hosts[id];
@@ -369,8 +365,8 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
369 else 365 else
370 isr = isci_msix_isr; 366 isr = isci_msix_isr;
371 367
372 err = devm_request_irq(&pdev->dev, msix->vector, isr, 0, 368 err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
373 DRV_NAME"-msix", ihost); 369 isr, 0, DRV_NAME"-msix", ihost);
374 if (!err) 370 if (!err)
375 continue; 371 continue;
376 372
@@ -378,18 +374,19 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
378 while (i--) { 374 while (i--) {
379 id = i / SCI_NUM_MSI_X_INT; 375 id = i / SCI_NUM_MSI_X_INT;
380 ihost = pci_info->hosts[id]; 376 ihost = pci_info->hosts[id];
381 msix = &pci_info->msix_entries[i]; 377 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
382 devm_free_irq(&pdev->dev, msix->vector, ihost); 378 ihost);
383 } 379 }
384 pci_disable_msix(pdev); 380 pci_free_irq_vectors(pdev);
385 goto intx; 381 goto intx;
386 } 382 }
387 return 0; 383 return 0;
388 384
389 intx: 385 intx:
390 for_each_isci_host(i, ihost, pdev) { 386 for_each_isci_host(i, ihost, pdev) {
391 err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr, 387 err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0),
392 IRQF_SHARED, DRV_NAME"-intx", ihost); 388 isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx",
389 ihost);
393 if (err) 390 if (err)
394 break; 391 break;
395 } 392 }
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 8ac646e5eddc..a2bbe46f8ccb 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -54,6 +54,7 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
54 len = pci_biosrom_size(pdev); 54 len = pci_biosrom_size(pdev);
55 rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL); 55 rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
56 if (!rom) { 56 if (!rom) {
57 pci_unmap_biosrom(oprom);
57 dev_warn(&pdev->dev, 58 dev_warn(&pdev->dev,
58 "Unable to allocate memory for orom\n"); 59 "Unable to allocate memory for orom\n");
59 return NULL; 60 return NULL;
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100638a2..e3f2a5359d71 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
66{ 66{
67 static const char * const strings[] = RNC_STATES; 67 static const char * const strings[] = RNC_STATES;
68 68
69 if (state >= ARRAY_SIZE(strings))
70 return "UNKNOWN";
71
69 return strings[state]; 72 return strings[state];
70} 73}
71#undef C 74#undef C
@@ -454,7 +457,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
454 * the device since it's being invalidated anyway */ 457 * the device since it's being invalidated anyway */
455 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 458 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
456 "%s: SCIC Remote Node Context 0x%p was " 459 "%s: SCIC Remote Node Context 0x%p was "
457 "suspeneded by hardware while being " 460 "suspended by hardware while being "
458 "invalidated.\n", __func__, sci_rnc); 461 "invalidated.\n", __func__, sci_rnc);
459 break; 462 break;
460 default: 463 default:
@@ -473,7 +476,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
473 * the device since it's being resumed anyway */ 476 * the device since it's being resumed anyway */
474 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 477 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
475 "%s: SCIC Remote Node Context 0x%p was " 478 "%s: SCIC Remote Node Context 0x%p was "
476 "suspeneded by hardware while being resumed.\n", 479 "suspended by hardware while being resumed.\n",
477 __func__, sci_rnc); 480 __func__, sci_rnc);
478 break; 481 break;
479 default: 482 default:
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index b709d2b20880..47f66e949745 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2473,7 +2473,7 @@ static void isci_request_process_response_iu(
2473 "%s: resp_iu = %p " 2473 "%s: resp_iu = %p "
2474 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 2474 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2475 "resp_iu->response_data_len = %x, " 2475 "resp_iu->response_data_len = %x, "
2476 "resp_iu->sense_data_len = %x\nrepsonse data: ", 2476 "resp_iu->sense_data_len = %x\nresponse data: ",
2477 __func__, 2477 __func__,
2478 resp_iu, 2478 resp_iu,
2479 resp_iu->status, 2479 resp_iu->status,
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 880a9068ca12..6103231104da 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -68,10 +68,14 @@ static void fc_disc_stop_rports(struct fc_disc *disc)
68 68
69 lport = fc_disc_lport(disc); 69 lport = fc_disc_lport(disc);
70 70
71 mutex_lock(&disc->disc_mutex); 71 rcu_read_lock();
72 list_for_each_entry_rcu(rdata, &disc->rports, peers) 72 list_for_each_entry_rcu(rdata, &disc->rports, peers) {
73 lport->tt.rport_logoff(rdata); 73 if (kref_get_unless_zero(&rdata->kref)) {
74 mutex_unlock(&disc->disc_mutex); 74 fc_rport_logoff(rdata);
75 kref_put(&rdata->kref, fc_rport_destroy);
76 }
77 }
78 rcu_read_unlock();
75} 79}
76 80
77/** 81/**
@@ -150,7 +154,7 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
150 break; 154 break;
151 } 155 }
152 } 156 }
153 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 157 fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
154 158
155 /* 159 /*
156 * If not doing a complete rediscovery, do GPN_ID on 160 * If not doing a complete rediscovery, do GPN_ID on
@@ -178,7 +182,7 @@ reject:
178 FC_DISC_DBG(disc, "Received a bad RSCN frame\n"); 182 FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
179 rjt_data.reason = ELS_RJT_LOGIC; 183 rjt_data.reason = ELS_RJT_LOGIC;
180 rjt_data.explan = ELS_EXPL_NONE; 184 rjt_data.explan = ELS_EXPL_NONE;
181 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); 185 fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
182 fc_frame_free(fp); 186 fc_frame_free(fp);
183} 187}
184 188
@@ -289,15 +293,19 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
289 * Skip ports which were never discovered. These are the dNS port 293 * Skip ports which were never discovered. These are the dNS port
290 * and ports which were created by PLOGI. 294 * and ports which were created by PLOGI.
291 */ 295 */
296 rcu_read_lock();
292 list_for_each_entry_rcu(rdata, &disc->rports, peers) { 297 list_for_each_entry_rcu(rdata, &disc->rports, peers) {
293 if (!rdata->disc_id) 298 if (!kref_get_unless_zero(&rdata->kref))
294 continue; 299 continue;
295 if (rdata->disc_id == disc->disc_id) 300 if (rdata->disc_id) {
296 lport->tt.rport_login(rdata); 301 if (rdata->disc_id == disc->disc_id)
297 else 302 fc_rport_login(rdata);
298 lport->tt.rport_logoff(rdata); 303 else
304 fc_rport_logoff(rdata);
305 }
306 kref_put(&rdata->kref, fc_rport_destroy);
299 } 307 }
300 308 rcu_read_unlock();
301 mutex_unlock(&disc->disc_mutex); 309 mutex_unlock(&disc->disc_mutex);
302 disc->disc_callback(lport, event); 310 disc->disc_callback(lport, event);
303 mutex_lock(&disc->disc_mutex); 311 mutex_lock(&disc->disc_mutex);
@@ -446,7 +454,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
446 454
447 if (ids.port_id != lport->port_id && 455 if (ids.port_id != lport->port_id &&
448 ids.port_name != lport->wwpn) { 456 ids.port_name != lport->wwpn) {
449 rdata = lport->tt.rport_create(lport, ids.port_id); 457 rdata = fc_rport_create(lport, ids.port_id);
450 if (rdata) { 458 if (rdata) {
451 rdata->ids.port_name = ids.port_name; 459 rdata->ids.port_name = ids.port_name;
452 rdata->disc_id = disc->disc_id; 460 rdata->disc_id = disc->disc_id;
@@ -592,7 +600,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
592 lport = rdata->local_port; 600 lport = rdata->local_port;
593 disc = &lport->disc; 601 disc = &lport->disc;
594 602
595 mutex_lock(&disc->disc_mutex);
596 if (PTR_ERR(fp) == -FC_EX_CLOSED) 603 if (PTR_ERR(fp) == -FC_EX_CLOSED)
597 goto out; 604 goto out;
598 if (IS_ERR(fp)) 605 if (IS_ERR(fp))
@@ -607,37 +614,41 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
607 goto redisc; 614 goto redisc;
608 pn = (struct fc_ns_gid_pn *)(cp + 1); 615 pn = (struct fc_ns_gid_pn *)(cp + 1);
609 port_name = get_unaligned_be64(&pn->fn_wwpn); 616 port_name = get_unaligned_be64(&pn->fn_wwpn);
617 mutex_lock(&rdata->rp_mutex);
610 if (rdata->ids.port_name == -1) 618 if (rdata->ids.port_name == -1)
611 rdata->ids.port_name = port_name; 619 rdata->ids.port_name = port_name;
612 else if (rdata->ids.port_name != port_name) { 620 else if (rdata->ids.port_name != port_name) {
613 FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. " 621 FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
614 "Port-id %6.6x wwpn %16.16llx\n", 622 "Port-id %6.6x wwpn %16.16llx\n",
615 rdata->ids.port_id, port_name); 623 rdata->ids.port_id, port_name);
616 lport->tt.rport_logoff(rdata); 624 mutex_unlock(&rdata->rp_mutex);
617 625 fc_rport_logoff(rdata);
618 new_rdata = lport->tt.rport_create(lport, 626 mutex_lock(&lport->disc.disc_mutex);
619 rdata->ids.port_id); 627 new_rdata = fc_rport_create(lport, rdata->ids.port_id);
628 mutex_unlock(&lport->disc.disc_mutex);
620 if (new_rdata) { 629 if (new_rdata) {
621 new_rdata->disc_id = disc->disc_id; 630 new_rdata->disc_id = disc->disc_id;
622 lport->tt.rport_login(new_rdata); 631 fc_rport_login(new_rdata);
623 } 632 }
624 goto out; 633 goto out;
625 } 634 }
626 rdata->disc_id = disc->disc_id; 635 rdata->disc_id = disc->disc_id;
627 lport->tt.rport_login(rdata); 636 mutex_unlock(&rdata->rp_mutex);
637 fc_rport_login(rdata);
628 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { 638 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
629 FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n", 639 FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
630 cp->ct_reason, cp->ct_explan); 640 cp->ct_reason, cp->ct_explan);
631 lport->tt.rport_logoff(rdata); 641 fc_rport_logoff(rdata);
632 } else { 642 } else {
633 FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n", 643 FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
634 ntohs(cp->ct_cmd)); 644 ntohs(cp->ct_cmd));
635redisc: 645redisc:
646 mutex_lock(&disc->disc_mutex);
636 fc_disc_restart(disc); 647 fc_disc_restart(disc);
648 mutex_unlock(&disc->disc_mutex);
637 } 649 }
638out: 650out:
639 mutex_unlock(&disc->disc_mutex); 651 kref_put(&rdata->kref, fc_rport_destroy);
640 kref_put(&rdata->kref, lport->tt.rport_destroy);
641} 652}
642 653
643/** 654/**
@@ -678,7 +689,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
678{ 689{
679 struct fc_rport_priv *rdata; 690 struct fc_rport_priv *rdata;
680 691
681 rdata = lport->tt.rport_create(lport, dp->port_id); 692 rdata = fc_rport_create(lport, dp->port_id);
682 if (!rdata) 693 if (!rdata)
683 return -ENOMEM; 694 return -ENOMEM;
684 rdata->disc_id = 0; 695 rdata->disc_id = 0;
@@ -708,7 +719,7 @@ static void fc_disc_stop(struct fc_lport *lport)
708static void fc_disc_stop_final(struct fc_lport *lport) 719static void fc_disc_stop_final(struct fc_lport *lport)
709{ 720{
710 fc_disc_stop(lport); 721 fc_disc_stop(lport);
711 lport->tt.rport_flush_queue(); 722 fc_rport_flush_queue();
712} 723}
713 724
714/** 725/**
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index c2384d501470..6384a98048af 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -67,7 +67,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
67 fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type, 67 fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
68 FC_FCTL_REQ, 0); 68 FC_FCTL_REQ, 0);
69 69
70 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); 70 return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
71} 71}
72EXPORT_SYMBOL(fc_elsct_send); 72EXPORT_SYMBOL(fc_elsct_send);
73 73
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 16ca31ad5ec0..42bcf7f3a0f9 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -94,6 +94,7 @@ struct fc_exch_pool {
94struct fc_exch_mgr { 94struct fc_exch_mgr {
95 struct fc_exch_pool __percpu *pool; 95 struct fc_exch_pool __percpu *pool;
96 mempool_t *ep_pool; 96 mempool_t *ep_pool;
97 struct fc_lport *lport;
97 enum fc_class class; 98 enum fc_class class;
98 struct kref kref; 99 struct kref kref;
99 u16 min_xid; 100 u16 min_xid;
@@ -362,8 +363,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
362 363
363 fc_exch_hold(ep); /* hold for timer */ 364 fc_exch_hold(ep); /* hold for timer */
364 if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, 365 if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
365 msecs_to_jiffies(timer_msec))) 366 msecs_to_jiffies(timer_msec))) {
367 FC_EXCH_DBG(ep, "Exchange already queued\n");
366 fc_exch_release(ep); 368 fc_exch_release(ep);
369 }
367} 370}
368 371
369/** 372/**
@@ -406,6 +409,8 @@ static int fc_exch_done_locked(struct fc_exch *ep)
406 return rc; 409 return rc;
407} 410}
408 411
412static struct fc_exch fc_quarantine_exch;
413
409/** 414/**
410 * fc_exch_ptr_get() - Return an exchange from an exchange pool 415 * fc_exch_ptr_get() - Return an exchange from an exchange pool
411 * @pool: Exchange Pool to get an exchange from 416 * @pool: Exchange Pool to get an exchange from
@@ -450,14 +455,17 @@ static void fc_exch_delete(struct fc_exch *ep)
450 455
451 /* update cache of free slot */ 456 /* update cache of free slot */
452 index = (ep->xid - ep->em->min_xid) >> fc_cpu_order; 457 index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
453 if (pool->left == FC_XID_UNKNOWN) 458 if (!(ep->state & FC_EX_QUARANTINE)) {
454 pool->left = index; 459 if (pool->left == FC_XID_UNKNOWN)
455 else if (pool->right == FC_XID_UNKNOWN) 460 pool->left = index;
456 pool->right = index; 461 else if (pool->right == FC_XID_UNKNOWN)
457 else 462 pool->right = index;
458 pool->next_index = index; 463 else
459 464 pool->next_index = index;
460 fc_exch_ptr_set(pool, index, NULL); 465 fc_exch_ptr_set(pool, index, NULL);
466 } else {
467 fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
468 }
461 list_del(&ep->ex_list); 469 list_del(&ep->ex_list);
462 spin_unlock_bh(&pool->lock); 470 spin_unlock_bh(&pool->lock);
463 fc_exch_release(ep); /* drop hold for exch in mp */ 471 fc_exch_release(ep); /* drop hold for exch in mp */
@@ -525,8 +533,7 @@ out:
525 * Note: The frame will be freed either by a direct call to fc_frame_free(fp) 533 * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
526 * or indirectly by calling libfc_function_template.frame_send(). 534 * or indirectly by calling libfc_function_template.frame_send().
527 */ 535 */
528static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, 536int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
529 struct fc_frame *fp)
530{ 537{
531 struct fc_exch *ep; 538 struct fc_exch *ep;
532 int error; 539 int error;
@@ -536,6 +543,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
536 spin_unlock_bh(&ep->ex_lock); 543 spin_unlock_bh(&ep->ex_lock);
537 return error; 544 return error;
538} 545}
546EXPORT_SYMBOL(fc_seq_send);
539 547
540/** 548/**
541 * fc_seq_alloc() - Allocate a sequence for a given exchange 549 * fc_seq_alloc() - Allocate a sequence for a given exchange
@@ -577,7 +585,7 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
577 * for a given sequence/exchange pair 585 * for a given sequence/exchange pair
578 * @sp: The sequence/exchange to get a new exchange for 586 * @sp: The sequence/exchange to get a new exchange for
579 */ 587 */
580static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) 588struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
581{ 589{
582 struct fc_exch *ep = fc_seq_exch(sp); 590 struct fc_exch *ep = fc_seq_exch(sp);
583 591
@@ -587,16 +595,16 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
587 595
588 return sp; 596 return sp;
589} 597}
598EXPORT_SYMBOL(fc_seq_start_next);
590 599
591/* 600/*
592 * Set the response handler for the exchange associated with a sequence. 601 * Set the response handler for the exchange associated with a sequence.
593 * 602 *
594 * Note: May sleep if invoked from outside a response handler. 603 * Note: May sleep if invoked from outside a response handler.
595 */ 604 */
596static void fc_seq_set_resp(struct fc_seq *sp, 605void fc_seq_set_resp(struct fc_seq *sp,
597 void (*resp)(struct fc_seq *, struct fc_frame *, 606 void (*resp)(struct fc_seq *, struct fc_frame *, void *),
598 void *), 607 void *arg)
599 void *arg)
600{ 608{
601 struct fc_exch *ep = fc_seq_exch(sp); 609 struct fc_exch *ep = fc_seq_exch(sp);
602 DEFINE_WAIT(wait); 610 DEFINE_WAIT(wait);
@@ -615,12 +623,20 @@ static void fc_seq_set_resp(struct fc_seq *sp,
615 ep->arg = arg; 623 ep->arg = arg;
616 spin_unlock_bh(&ep->ex_lock); 624 spin_unlock_bh(&ep->ex_lock);
617} 625}
626EXPORT_SYMBOL(fc_seq_set_resp);
618 627
619/** 628/**
620 * fc_exch_abort_locked() - Abort an exchange 629 * fc_exch_abort_locked() - Abort an exchange
621 * @ep: The exchange to be aborted 630 * @ep: The exchange to be aborted
622 * @timer_msec: The period of time to wait before aborting 631 * @timer_msec: The period of time to wait before aborting
623 * 632 *
633 * Abort an exchange and sequence. Generally called because of a
634 * exchange timeout or an abort from the upper layer.
635 *
636 * A timer_msec can be specified for abort timeout, if non-zero
637 * timer_msec value is specified then exchange resp handler
638 * will be called with timeout error if no response to abort.
639 *
624 * Locking notes: Called with exch lock held 640 * Locking notes: Called with exch lock held
625 * 641 *
626 * Return value: 0 on success else error code 642 * Return value: 0 on success else error code
@@ -632,9 +648,13 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
632 struct fc_frame *fp; 648 struct fc_frame *fp;
633 int error; 649 int error;
634 650
651 FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
635 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || 652 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
636 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) 653 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
654 FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
655 ep->esb_stat, ep->state);
637 return -ENXIO; 656 return -ENXIO;
657 }
638 658
639 /* 659 /*
640 * Send the abort on a new sequence if possible. 660 * Send the abort on a new sequence if possible.
@@ -680,8 +700,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
680 * 700 *
681 * Return value: 0 on success else error code 701 * Return value: 0 on success else error code
682 */ 702 */
683static int fc_seq_exch_abort(const struct fc_seq *req_sp, 703int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
684 unsigned int timer_msec)
685{ 704{
686 struct fc_exch *ep; 705 struct fc_exch *ep;
687 int error; 706 int error;
@@ -758,7 +777,7 @@ static void fc_exch_timeout(struct work_struct *work)
758 u32 e_stat; 777 u32 e_stat;
759 int rc = 1; 778 int rc = 1;
760 779
761 FC_EXCH_DBG(ep, "Exchange timed out\n"); 780 FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
762 781
763 spin_lock_bh(&ep->ex_lock); 782 spin_lock_bh(&ep->ex_lock);
764 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 783 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
@@ -821,14 +840,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
821 840
822 /* peek cache of free slot */ 841 /* peek cache of free slot */
823 if (pool->left != FC_XID_UNKNOWN) { 842 if (pool->left != FC_XID_UNKNOWN) {
824 index = pool->left; 843 if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
825 pool->left = FC_XID_UNKNOWN; 844 index = pool->left;
826 goto hit; 845 pool->left = FC_XID_UNKNOWN;
846 goto hit;
847 }
827 } 848 }
828 if (pool->right != FC_XID_UNKNOWN) { 849 if (pool->right != FC_XID_UNKNOWN) {
829 index = pool->right; 850 if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
830 pool->right = FC_XID_UNKNOWN; 851 index = pool->right;
831 goto hit; 852 pool->right = FC_XID_UNKNOWN;
853 goto hit;
854 }
832 } 855 }
833 856
834 index = pool->next_index; 857 index = pool->next_index;
@@ -888,14 +911,19 @@ err:
888 * EM is selected when a NULL match function pointer is encountered 911 * EM is selected when a NULL match function pointer is encountered
889 * or when a call to a match function returns true. 912 * or when a call to a match function returns true.
890 */ 913 */
891static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport, 914static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
892 struct fc_frame *fp) 915 struct fc_frame *fp)
893{ 916{
894 struct fc_exch_mgr_anchor *ema; 917 struct fc_exch_mgr_anchor *ema;
918 struct fc_exch *ep;
895 919
896 list_for_each_entry(ema, &lport->ema_list, ema_list) 920 list_for_each_entry(ema, &lport->ema_list, ema_list) {
897 if (!ema->match || ema->match(fp)) 921 if (!ema->match || ema->match(fp)) {
898 return fc_exch_em_alloc(lport, ema->mp); 922 ep = fc_exch_em_alloc(lport, ema->mp);
923 if (ep)
924 return ep;
925 }
926 }
899 return NULL; 927 return NULL;
900} 928}
901 929
@@ -906,14 +934,17 @@ static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
906 */ 934 */
907static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) 935static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
908{ 936{
937 struct fc_lport *lport = mp->lport;
909 struct fc_exch_pool *pool; 938 struct fc_exch_pool *pool;
910 struct fc_exch *ep = NULL; 939 struct fc_exch *ep = NULL;
911 u16 cpu = xid & fc_cpu_mask; 940 u16 cpu = xid & fc_cpu_mask;
912 941
942 if (xid == FC_XID_UNKNOWN)
943 return NULL;
944
913 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { 945 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
914 printk_ratelimited(KERN_ERR 946 pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
915 "libfc: lookup request for XID = %d, " 947 lport->host->host_no, lport->port_id, xid, cpu);
916 "indicates invalid CPU %d\n", xid, cpu);
917 return NULL; 948 return NULL;
918 } 949 }
919 950
@@ -921,6 +952,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
921 pool = per_cpu_ptr(mp->pool, cpu); 952 pool = per_cpu_ptr(mp->pool, cpu);
922 spin_lock_bh(&pool->lock); 953 spin_lock_bh(&pool->lock);
923 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); 954 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
955 if (ep == &fc_quarantine_exch) {
956 FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
957 ep = NULL;
958 }
924 if (ep) { 959 if (ep) {
925 WARN_ON(ep->xid != xid); 960 WARN_ON(ep->xid != xid);
926 fc_exch_hold(ep); 961 fc_exch_hold(ep);
@@ -938,7 +973,7 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
938 * 973 *
939 * Note: May sleep if invoked from outside a response handler. 974 * Note: May sleep if invoked from outside a response handler.
940 */ 975 */
941static void fc_exch_done(struct fc_seq *sp) 976void fc_exch_done(struct fc_seq *sp)
942{ 977{
943 struct fc_exch *ep = fc_seq_exch(sp); 978 struct fc_exch *ep = fc_seq_exch(sp);
944 int rc; 979 int rc;
@@ -951,6 +986,7 @@ static void fc_exch_done(struct fc_seq *sp)
951 if (!rc) 986 if (!rc)
952 fc_exch_delete(ep); 987 fc_exch_delete(ep);
953} 988}
989EXPORT_SYMBOL(fc_exch_done);
954 990
955/** 991/**
956 * fc_exch_resp() - Allocate a new exchange for a response frame 992 * fc_exch_resp() - Allocate a new exchange for a response frame
@@ -1197,8 +1233,8 @@ static void fc_exch_set_addr(struct fc_exch *ep,
1197 * 1233 *
1198 * The received frame is not freed. 1234 * The received frame is not freed.
1199 */ 1235 */
1200static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd, 1236void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1201 struct fc_seq_els_data *els_data) 1237 struct fc_seq_els_data *els_data)
1202{ 1238{
1203 switch (els_cmd) { 1239 switch (els_cmd) {
1204 case ELS_LS_RJT: 1240 case ELS_LS_RJT:
@@ -1217,6 +1253,7 @@ static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1217 FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd); 1253 FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1218 } 1254 }
1219} 1255}
1256EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
1220 1257
1221/** 1258/**
1222 * fc_seq_send_last() - Send a sequence that is the last in the exchange 1259 * fc_seq_send_last() - Send a sequence that is the last in the exchange
@@ -1258,8 +1295,10 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1258 */ 1295 */
1259 if (fc_sof_needs_ack(fr_sof(rx_fp))) { 1296 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1260 fp = fc_frame_alloc(lport, 0); 1297 fp = fc_frame_alloc(lport, 0);
1261 if (!fp) 1298 if (!fp) {
1299 FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
1262 return; 1300 return;
1301 }
1263 1302
1264 fh = fc_frame_header_get(fp); 1303 fh = fc_frame_header_get(fp);
1265 fh->fh_r_ctl = FC_RCTL_ACK_1; 1304 fh->fh_r_ctl = FC_RCTL_ACK_1;
@@ -1312,13 +1351,18 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1312 struct fc_frame_header *rx_fh; 1351 struct fc_frame_header *rx_fh;
1313 struct fc_frame_header *fh; 1352 struct fc_frame_header *fh;
1314 struct fc_ba_rjt *rp; 1353 struct fc_ba_rjt *rp;
1354 struct fc_seq *sp;
1315 struct fc_lport *lport; 1355 struct fc_lport *lport;
1316 unsigned int f_ctl; 1356 unsigned int f_ctl;
1317 1357
1318 lport = fr_dev(rx_fp); 1358 lport = fr_dev(rx_fp);
1359 sp = fr_seq(rx_fp);
1319 fp = fc_frame_alloc(lport, sizeof(*rp)); 1360 fp = fc_frame_alloc(lport, sizeof(*rp));
1320 if (!fp) 1361 if (!fp) {
1362 FC_EXCH_DBG(fc_seq_exch(sp),
1363 "Drop BA_RJT request, out of memory\n");
1321 return; 1364 return;
1365 }
1322 fh = fc_frame_header_get(fp); 1366 fh = fc_frame_header_get(fp);
1323 rx_fh = fc_frame_header_get(rx_fp); 1367 rx_fh = fc_frame_header_get(rx_fp);
1324 1368
@@ -1383,14 +1427,17 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1383 if (!ep) 1427 if (!ep)
1384 goto reject; 1428 goto reject;
1385 1429
1430 FC_EXCH_DBG(ep, "exch: ABTS received\n");
1386 fp = fc_frame_alloc(ep->lp, sizeof(*ap)); 1431 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1387 if (!fp) 1432 if (!fp) {
1433 FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
1388 goto free; 1434 goto free;
1435 }
1389 1436
1390 spin_lock_bh(&ep->ex_lock); 1437 spin_lock_bh(&ep->ex_lock);
1391 if (ep->esb_stat & ESB_ST_COMPLETE) { 1438 if (ep->esb_stat & ESB_ST_COMPLETE) {
1392 spin_unlock_bh(&ep->ex_lock); 1439 spin_unlock_bh(&ep->ex_lock);
1393 1440 FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
1394 fc_frame_free(fp); 1441 fc_frame_free(fp);
1395 goto reject; 1442 goto reject;
1396 } 1443 }
@@ -1433,7 +1480,7 @@ reject:
1433 * A reference will be held on the exchange/sequence for the caller, which 1480 * A reference will be held on the exchange/sequence for the caller, which
1434 * must call fc_seq_release(). 1481 * must call fc_seq_release().
1435 */ 1482 */
1436static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) 1483struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1437{ 1484{
1438 struct fc_exch_mgr_anchor *ema; 1485 struct fc_exch_mgr_anchor *ema;
1439 1486
@@ -1447,15 +1494,17 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1447 break; 1494 break;
1448 return fr_seq(fp); 1495 return fr_seq(fp);
1449} 1496}
1497EXPORT_SYMBOL(fc_seq_assign);
1450 1498
1451/** 1499/**
1452 * fc_seq_release() - Release the hold 1500 * fc_seq_release() - Release the hold
1453 * @sp: The sequence. 1501 * @sp: The sequence.
1454 */ 1502 */
1455static void fc_seq_release(struct fc_seq *sp) 1503void fc_seq_release(struct fc_seq *sp)
1456{ 1504{
1457 fc_exch_release(fc_seq_exch(sp)); 1505 fc_exch_release(fc_seq_exch(sp));
1458} 1506}
1507EXPORT_SYMBOL(fc_seq_release);
1459 1508
1460/** 1509/**
1461 * fc_exch_recv_req() - Handler for an incoming request 1510 * fc_exch_recv_req() - Handler for an incoming request
@@ -1491,7 +1540,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1491 * The upper-level protocol may request one later, if needed. 1540 * The upper-level protocol may request one later, if needed.
1492 */ 1541 */
1493 if (fh->fh_rx_id == htons(FC_XID_UNKNOWN)) 1542 if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1494 return lport->tt.lport_recv(lport, fp); 1543 return fc_lport_recv(lport, fp);
1495 1544
1496 reject = fc_seq_lookup_recip(lport, mp, fp); 1545 reject = fc_seq_lookup_recip(lport, mp, fp);
1497 if (reject == FC_RJT_NONE) { 1546 if (reject == FC_RJT_NONE) {
@@ -1512,7 +1561,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1512 * first. 1561 * first.
1513 */ 1562 */
1514 if (!fc_invoke_resp(ep, sp, fp)) 1563 if (!fc_invoke_resp(ep, sp, fp))
1515 lport->tt.lport_recv(lport, fp); 1564 fc_lport_recv(lport, fp);
1516 fc_exch_release(ep); /* release from lookup */ 1565 fc_exch_release(ep); /* release from lookup */
1517 } else { 1566 } else {
1518 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n", 1567 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
@@ -1562,9 +1611,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1562 if (fc_sof_is_init(sof)) { 1611 if (fc_sof_is_init(sof)) {
1563 sp->ssb_stat |= SSB_ST_RESP; 1612 sp->ssb_stat |= SSB_ST_RESP;
1564 sp->id = fh->fh_seq_id; 1613 sp->id = fh->fh_seq_id;
1565 } else if (sp->id != fh->fh_seq_id) {
1566 atomic_inc(&mp->stats.seq_not_found);
1567 goto rel;
1568 } 1614 }
1569 1615
1570 f_ctl = ntoh24(fh->fh_f_ctl); 1616 f_ctl = ntoh24(fh->fh_f_ctl);
@@ -1761,7 +1807,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1761 fc_frame_free(fp); 1807 fc_frame_free(fp);
1762 break; 1808 break;
1763 case FC_RCTL_BA_ABTS: 1809 case FC_RCTL_BA_ABTS:
1764 fc_exch_recv_abts(ep, fp); 1810 if (ep)
1811 fc_exch_recv_abts(ep, fp);
1812 else
1813 fc_frame_free(fp);
1765 break; 1814 break;
1766 default: /* ignore junk */ 1815 default: /* ignore junk */
1767 fc_frame_free(fp); 1816 fc_frame_free(fp);
@@ -1784,11 +1833,16 @@ static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1784 struct fc_lport *lport; 1833 struct fc_lport *lport;
1785 struct fc_els_ls_acc *acc; 1834 struct fc_els_ls_acc *acc;
1786 struct fc_frame *fp; 1835 struct fc_frame *fp;
1836 struct fc_seq *sp;
1787 1837
1788 lport = fr_dev(rx_fp); 1838 lport = fr_dev(rx_fp);
1839 sp = fr_seq(rx_fp);
1789 fp = fc_frame_alloc(lport, sizeof(*acc)); 1840 fp = fc_frame_alloc(lport, sizeof(*acc));
1790 if (!fp) 1841 if (!fp) {
1842 FC_EXCH_DBG(fc_seq_exch(sp),
1843 "exch: drop LS_ACC, out of memory\n");
1791 return; 1844 return;
1845 }
1792 acc = fc_frame_payload_get(fp, sizeof(*acc)); 1846 acc = fc_frame_payload_get(fp, sizeof(*acc));
1793 memset(acc, 0, sizeof(*acc)); 1847 memset(acc, 0, sizeof(*acc));
1794 acc->la_cmd = ELS_LS_ACC; 1848 acc->la_cmd = ELS_LS_ACC;
@@ -1811,11 +1865,16 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1811 struct fc_lport *lport; 1865 struct fc_lport *lport;
1812 struct fc_els_ls_rjt *rjt; 1866 struct fc_els_ls_rjt *rjt;
1813 struct fc_frame *fp; 1867 struct fc_frame *fp;
1868 struct fc_seq *sp;
1814 1869
1815 lport = fr_dev(rx_fp); 1870 lport = fr_dev(rx_fp);
1871 sp = fr_seq(rx_fp);
1816 fp = fc_frame_alloc(lport, sizeof(*rjt)); 1872 fp = fc_frame_alloc(lport, sizeof(*rjt));
1817 if (!fp) 1873 if (!fp) {
1874 FC_EXCH_DBG(fc_seq_exch(sp),
1875 "exch: drop LS_ACC, out of memory\n");
1818 return; 1876 return;
1877 }
1819 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1878 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1820 memset(rjt, 0, sizeof(*rjt)); 1879 memset(rjt, 0, sizeof(*rjt));
1821 rjt->er_cmd = ELS_LS_RJT; 1880 rjt->er_cmd = ELS_LS_RJT;
@@ -1960,8 +2019,7 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
1960 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC; 2019 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1961 enum fc_els_rjt_explan explan; 2020 enum fc_els_rjt_explan explan;
1962 u32 sid; 2021 u32 sid;
1963 u16 rxid; 2022 u16 xid, rxid, oxid;
1964 u16 oxid;
1965 2023
1966 lport = fr_dev(rfp); 2024 lport = fr_dev(rfp);
1967 rp = fc_frame_payload_get(rfp, sizeof(*rp)); 2025 rp = fc_frame_payload_get(rfp, sizeof(*rp));
@@ -1972,18 +2030,35 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
1972 rxid = ntohs(rp->rec_rx_id); 2030 rxid = ntohs(rp->rec_rx_id);
1973 oxid = ntohs(rp->rec_ox_id); 2031 oxid = ntohs(rp->rec_ox_id);
1974 2032
1975 ep = fc_exch_lookup(lport,
1976 sid == fc_host_port_id(lport->host) ? oxid : rxid);
1977 explan = ELS_EXPL_OXID_RXID; 2033 explan = ELS_EXPL_OXID_RXID;
1978 if (!ep) 2034 if (sid == fc_host_port_id(lport->host))
2035 xid = oxid;
2036 else
2037 xid = rxid;
2038 if (xid == FC_XID_UNKNOWN) {
2039 FC_LPORT_DBG(lport,
2040 "REC request from %x: invalid rxid %x oxid %x\n",
2041 sid, rxid, oxid);
2042 goto reject;
2043 }
2044 ep = fc_exch_lookup(lport, xid);
2045 if (!ep) {
2046 FC_LPORT_DBG(lport,
2047 "REC request from %x: rxid %x oxid %x not found\n",
2048 sid, rxid, oxid);
1979 goto reject; 2049 goto reject;
2050 }
2051 FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
2052 sid, rxid, oxid);
1980 if (ep->oid != sid || oxid != ep->oxid) 2053 if (ep->oid != sid || oxid != ep->oxid)
1981 goto rel; 2054 goto rel;
1982 if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid) 2055 if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
1983 goto rel; 2056 goto rel;
1984 fp = fc_frame_alloc(lport, sizeof(*acc)); 2057 fp = fc_frame_alloc(lport, sizeof(*acc));
1985 if (!fp) 2058 if (!fp) {
2059 FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
1986 goto out; 2060 goto out;
2061 }
1987 2062
1988 acc = fc_frame_payload_get(fp, sizeof(*acc)); 2063 acc = fc_frame_payload_get(fp, sizeof(*acc));
1989 memset(acc, 0, sizeof(*acc)); 2064 memset(acc, 0, sizeof(*acc));
@@ -2065,6 +2140,24 @@ cleanup:
2065 * @arg: The argument to be passed to the response handler 2140 * @arg: The argument to be passed to the response handler
2066 * @timer_msec: The timeout period for the exchange 2141 * @timer_msec: The timeout period for the exchange
2067 * 2142 *
2143 * The exchange response handler is set in this routine to resp()
2144 * function pointer. It can be called in two scenarios: if a timeout
2145 * occurs or if a response frame is received for the exchange. The
2146 * fc_frame pointer in response handler will also indicate timeout
2147 * as error using IS_ERR related macros.
2148 *
2149 * The exchange destructor handler is also set in this routine.
2150 * The destructor handler is invoked by EM layer when exchange
2151 * is about to free, this can be used by caller to free its
2152 * resources along with exchange free.
2153 *
2154 * The arg is passed back to resp and destructor handler.
2155 *
2156 * The timeout value (in msec) for an exchange is set if non zero
2157 * timer_msec argument is specified. The timer is canceled when
2158 * it fires or when the exchange is done. The exchange timeout handler
2159 * is registered by EM layer.
2160 *
2068 * The frame pointer with some of the header's fields must be 2161 * The frame pointer with some of the header's fields must be
2069 * filled before calling this routine, those fields are: 2162 * filled before calling this routine, those fields are:
2070 * 2163 *
@@ -2075,14 +2168,13 @@ cleanup:
2075 * - frame control 2168 * - frame control
2076 * - parameter or relative offset 2169 * - parameter or relative offset
2077 */ 2170 */
2078static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, 2171struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
2079 struct fc_frame *fp, 2172 struct fc_frame *fp,
2080 void (*resp)(struct fc_seq *, 2173 void (*resp)(struct fc_seq *,
2081 struct fc_frame *fp, 2174 struct fc_frame *fp,
2082 void *arg), 2175 void *arg),
2083 void (*destructor)(struct fc_seq *, 2176 void (*destructor)(struct fc_seq *, void *),
2084 void *), 2177 void *arg, u32 timer_msec)
2085 void *arg, u32 timer_msec)
2086{ 2178{
2087 struct fc_exch *ep; 2179 struct fc_exch *ep;
2088 struct fc_seq *sp = NULL; 2180 struct fc_seq *sp = NULL;
@@ -2101,7 +2193,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
2101 ep->resp = resp; 2193 ep->resp = resp;
2102 ep->destructor = destructor; 2194 ep->destructor = destructor;
2103 ep->arg = arg; 2195 ep->arg = arg;
2104 ep->r_a_tov = FC_DEF_R_A_TOV; 2196 ep->r_a_tov = lport->r_a_tov;
2105 ep->lp = lport; 2197 ep->lp = lport;
2106 sp = &ep->seq; 2198 sp = &ep->seq;
2107 2199
@@ -2135,6 +2227,7 @@ err:
2135 fc_exch_delete(ep); 2227 fc_exch_delete(ep);
2136 return NULL; 2228 return NULL;
2137} 2229}
2230EXPORT_SYMBOL(fc_exch_seq_send);
2138 2231
2139/** 2232/**
2140 * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command 2233 * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
@@ -2176,6 +2269,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
2176 return; 2269 return;
2177 2270
2178retry: 2271retry:
2272 FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
2179 spin_lock_bh(&ep->ex_lock); 2273 spin_lock_bh(&ep->ex_lock);
2180 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) { 2274 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2181 spin_unlock_bh(&ep->ex_lock); 2275 spin_unlock_bh(&ep->ex_lock);
@@ -2218,6 +2312,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp)
2218 if (!ep) 2312 if (!ep)
2219 goto reject; 2313 goto reject;
2220 spin_lock_bh(&ep->ex_lock); 2314 spin_lock_bh(&ep->ex_lock);
2315 FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
2316 sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
2221 if (ep->oxid != ntohs(rp->rrq_ox_id)) 2317 if (ep->oxid != ntohs(rp->rrq_ox_id))
2222 goto unlock_reject; 2318 goto unlock_reject;
2223 if (ep->rxid != ntohs(rp->rrq_rx_id) && 2319 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
@@ -2385,6 +2481,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2385 return NULL; 2481 return NULL;
2386 2482
2387 mp->class = class; 2483 mp->class = class;
2484 mp->lport = lport;
2388 /* adjust em exch xid range for offload */ 2485 /* adjust em exch xid range for offload */
2389 mp->min_xid = min_xid; 2486 mp->min_xid = min_xid;
2390 2487
@@ -2558,36 +2655,9 @@ EXPORT_SYMBOL(fc_exch_recv);
2558 */ 2655 */
2559int fc_exch_init(struct fc_lport *lport) 2656int fc_exch_init(struct fc_lport *lport)
2560{ 2657{
2561 if (!lport->tt.seq_start_next)
2562 lport->tt.seq_start_next = fc_seq_start_next;
2563
2564 if (!lport->tt.seq_set_resp)
2565 lport->tt.seq_set_resp = fc_seq_set_resp;
2566
2567 if (!lport->tt.exch_seq_send)
2568 lport->tt.exch_seq_send = fc_exch_seq_send;
2569
2570 if (!lport->tt.seq_send)
2571 lport->tt.seq_send = fc_seq_send;
2572
2573 if (!lport->tt.seq_els_rsp_send)
2574 lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
2575
2576 if (!lport->tt.exch_done)
2577 lport->tt.exch_done = fc_exch_done;
2578
2579 if (!lport->tt.exch_mgr_reset) 2658 if (!lport->tt.exch_mgr_reset)
2580 lport->tt.exch_mgr_reset = fc_exch_mgr_reset; 2659 lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2581 2660
2582 if (!lport->tt.seq_exch_abort)
2583 lport->tt.seq_exch_abort = fc_seq_exch_abort;
2584
2585 if (!lport->tt.seq_assign)
2586 lport->tt.seq_assign = fc_seq_assign;
2587
2588 if (!lport->tt.seq_release)
2589 lport->tt.seq_release = fc_seq_release;
2590
2591 return 0; 2661 return 0;
2592} 2662}
2593EXPORT_SYMBOL(fc_exch_init); 2663EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5121272f28fd..0e67621477a8 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -122,6 +122,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
122#define FC_HRD_ERROR 9 122#define FC_HRD_ERROR 9
123#define FC_CRC_ERROR 10 123#define FC_CRC_ERROR 10
124#define FC_TIMED_OUT 11 124#define FC_TIMED_OUT 11
125#define FC_TRANS_RESET 12
125 126
126/* 127/*
127 * Error recovery timeout values. 128 * Error recovery timeout values.
@@ -195,7 +196,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
195 * @seq: The sequence that the FCP packet is on (required by destructor API) 196 * @seq: The sequence that the FCP packet is on (required by destructor API)
196 * @fsp: The FCP packet to be released 197 * @fsp: The FCP packet to be released
197 * 198 *
198 * This routine is called by a destructor callback in the exch_seq_send() 199 * This routine is called by a destructor callback in the fc_exch_seq_send()
199 * routine of the libfc Transport Template. The 'struct fc_seq' is a required 200 * routine of the libfc Transport Template. The 'struct fc_seq' is a required
200 * argument even though it is not used by this routine. 201 * argument even though it is not used by this routine.
201 * 202 *
@@ -253,8 +254,21 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
253 */ 254 */
254static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 255static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
255{ 256{
256 if (!(fsp->state & FC_SRB_COMPL)) 257 if (!(fsp->state & FC_SRB_COMPL)) {
257 mod_timer(&fsp->timer, jiffies + delay); 258 mod_timer(&fsp->timer, jiffies + delay);
259 fsp->timer_delay = delay;
260 }
261}
262
263static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp)
264{
265 fsp->state |= FC_SRB_ABORTED;
266 fsp->state &= ~FC_SRB_ABORT_PENDING;
267
268 if (fsp->wait_for_comp)
269 complete(&fsp->tm_done);
270 else
271 fc_fcp_complete_locked(fsp);
258} 272}
259 273
260/** 274/**
@@ -264,6 +278,8 @@ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
264 */ 278 */
265static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) 279static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
266{ 280{
281 int rc;
282
267 if (!fsp->seq_ptr) 283 if (!fsp->seq_ptr)
268 return -EINVAL; 284 return -EINVAL;
269 285
@@ -271,7 +287,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
271 put_cpu(); 287 put_cpu();
272 288
273 fsp->state |= FC_SRB_ABORT_PENDING; 289 fsp->state |= FC_SRB_ABORT_PENDING;
274 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); 290 rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
291 /*
292 * fc_seq_exch_abort() might return -ENXIO if
293 * the sequence is already completed
294 */
295 if (rc == -ENXIO) {
296 fc_fcp_abort_done(fsp);
297 rc = 0;
298 }
299 return rc;
275} 300}
276 301
277/** 302/**
@@ -283,16 +308,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
283 * fc_io_compl() will notify the SCSI-ml that the I/O is done. 308 * fc_io_compl() will notify the SCSI-ml that the I/O is done.
284 * The SCSI-ml will retry the command. 309 * The SCSI-ml will retry the command.
285 */ 310 */
286static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) 311static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code)
287{ 312{
288 if (fsp->seq_ptr) { 313 if (fsp->seq_ptr) {
289 fsp->lp->tt.exch_done(fsp->seq_ptr); 314 fc_exch_done(fsp->seq_ptr);
290 fsp->seq_ptr = NULL; 315 fsp->seq_ptr = NULL;
291 } 316 }
292 317
293 fsp->state &= ~FC_SRB_ABORT_PENDING; 318 fsp->state &= ~FC_SRB_ABORT_PENDING;
294 fsp->io_status = 0; 319 fsp->io_status = 0;
295 fsp->status_code = FC_ERROR; 320 fsp->status_code = status_code;
296 fc_fcp_complete_locked(fsp); 321 fc_fcp_complete_locked(fsp);
297} 322}
298 323
@@ -402,8 +427,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
402 if (!can_queue) 427 if (!can_queue)
403 can_queue = 1; 428 can_queue = 1;
404 lport->host->can_queue = can_queue; 429 lport->host->can_queue = can_queue;
405 shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
406 "Reducing can_queue to %d.\n", can_queue);
407 430
408unlock: 431unlock:
409 spin_unlock_irqrestore(lport->host->host_lock, flags); 432 spin_unlock_irqrestore(lport->host->host_lock, flags);
@@ -430,10 +453,29 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
430 put_cpu(); 453 put_cpu();
431 /* error case */ 454 /* error case */
432 fc_fcp_can_queue_ramp_down(lport); 455 fc_fcp_can_queue_ramp_down(lport);
456 shost_printk(KERN_ERR, lport->host,
457 "libfc: Could not allocate frame, "
458 "reducing can_queue to %d.\n", lport->host->can_queue);
433 return NULL; 459 return NULL;
434} 460}
435 461
436/** 462/**
463 * get_fsp_rec_tov() - Helper function to get REC_TOV
464 * @fsp: the FCP packet
465 *
466 * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
467 */
468static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
469{
470 struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
471 unsigned int e_d_tov = FC_DEF_E_D_TOV;
472
473 if (rpriv && rpriv->e_d_tov > e_d_tov)
474 e_d_tov = rpriv->e_d_tov;
475 return msecs_to_jiffies(e_d_tov) + HZ;
476}
477
478/**
437 * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target 479 * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
438 * @fsp: The FCP packet the data is on 480 * @fsp: The FCP packet the data is on
439 * @fp: The data frame 481 * @fp: The data frame
@@ -536,8 +578,10 @@ crc_err:
536 * and completes the transfer, call the completion handler. 578 * and completes the transfer, call the completion handler.
537 */ 579 */
538 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) && 580 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
539 fsp->xfer_len == fsp->data_len - fsp->scsi_resid) 581 fsp->xfer_len == fsp->data_len - fsp->scsi_resid) {
582 FC_FCP_DBG( fsp, "complete out-of-order sequence\n" );
540 fc_fcp_complete_locked(fsp); 583 fc_fcp_complete_locked(fsp);
584 }
541 return; 585 return;
542err: 586err:
543 fc_fcp_recovery(fsp, host_bcode); 587 fc_fcp_recovery(fsp, host_bcode);
@@ -609,7 +653,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
609 remaining = seq_blen; 653 remaining = seq_blen;
610 fh_parm_offset = frame_offset = offset; 654 fh_parm_offset = frame_offset = offset;
611 tlen = 0; 655 tlen = 0;
612 seq = lport->tt.seq_start_next(seq); 656 seq = fc_seq_start_next(seq);
613 f_ctl = FC_FC_REL_OFF; 657 f_ctl = FC_FC_REL_OFF;
614 WARN_ON(!seq); 658 WARN_ON(!seq);
615 659
@@ -687,7 +731,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
687 /* 731 /*
688 * send fragment using for a sequence. 732 * send fragment using for a sequence.
689 */ 733 */
690 error = lport->tt.seq_send(lport, seq, fp); 734 error = fc_seq_send(lport, seq, fp);
691 if (error) { 735 if (error) {
692 WARN_ON(1); /* send error should be rare */ 736 WARN_ON(1); /* send error should be rare */
693 return error; 737 return error;
@@ -727,15 +771,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
727 ba_done = 0; 771 ba_done = 0;
728 } 772 }
729 773
730 if (ba_done) { 774 if (ba_done)
731 fsp->state |= FC_SRB_ABORTED; 775 fc_fcp_abort_done(fsp);
732 fsp->state &= ~FC_SRB_ABORT_PENDING;
733
734 if (fsp->wait_for_comp)
735 complete(&fsp->tm_done);
736 else
737 fc_fcp_complete_locked(fsp);
738 }
739} 776}
740 777
741/** 778/**
@@ -764,8 +801,11 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
764 fh = fc_frame_header_get(fp); 801 fh = fc_frame_header_get(fp);
765 r_ctl = fh->fh_r_ctl; 802 r_ctl = fh->fh_r_ctl;
766 803
767 if (lport->state != LPORT_ST_READY) 804 if (lport->state != LPORT_ST_READY) {
805 FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n",
806 lport->state, r_ctl);
768 goto out; 807 goto out;
808 }
769 if (fc_fcp_lock_pkt(fsp)) 809 if (fc_fcp_lock_pkt(fsp))
770 goto out; 810 goto out;
771 811
@@ -774,8 +814,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
774 goto unlock; 814 goto unlock;
775 } 815 }
776 816
777 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) 817 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) {
818 FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl);
778 goto unlock; 819 goto unlock;
820 }
779 821
780 if (r_ctl == FC_RCTL_DD_DATA_DESC) { 822 if (r_ctl == FC_RCTL_DD_DATA_DESC) {
781 /* 823 /*
@@ -910,7 +952,16 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
910 * Wait a at least one jiffy to see if it is delivered. 952 * Wait a at least one jiffy to see if it is delivered.
911 * If this expires without data, we may do SRR. 953 * If this expires without data, we may do SRR.
912 */ 954 */
913 fc_fcp_timer_set(fsp, 2); 955 if (fsp->lp->qfull) {
956 FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n",
957 fsp->rport->port_id);
958 return;
959 }
960 FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun "
961 "len %x, data len %x\n",
962 fsp->rport->port_id,
963 fsp->xfer_len, expected_len, fsp->data_len);
964 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
914 return; 965 return;
915 } 966 }
916 fsp->status_code = FC_DATA_OVRRUN; 967 fsp->status_code = FC_DATA_OVRRUN;
@@ -959,8 +1010,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
959 if (fsp->cdb_status == SAM_STAT_GOOD && 1010 if (fsp->cdb_status == SAM_STAT_GOOD &&
960 fsp->xfer_len < fsp->data_len && !fsp->io_status && 1011 fsp->xfer_len < fsp->data_len && !fsp->io_status &&
961 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || 1012 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
962 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) 1013 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
1014 FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n",
1015 fsp->xfer_len, fsp->data_len);
963 fsp->status_code = FC_DATA_UNDRUN; 1016 fsp->status_code = FC_DATA_UNDRUN;
1017 }
964 } 1018 }
965 1019
966 seq = fsp->seq_ptr; 1020 seq = fsp->seq_ptr;
@@ -970,7 +1024,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
970 struct fc_frame *conf_frame; 1024 struct fc_frame *conf_frame;
971 struct fc_seq *csp; 1025 struct fc_seq *csp;
972 1026
973 csp = lport->tt.seq_start_next(seq); 1027 csp = fc_seq_start_next(seq);
974 conf_frame = fc_fcp_frame_alloc(fsp->lp, 0); 1028 conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
975 if (conf_frame) { 1029 if (conf_frame) {
976 f_ctl = FC_FC_SEQ_INIT; 1030 f_ctl = FC_FC_SEQ_INIT;
@@ -979,10 +1033,10 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
979 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, 1033 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
980 ep->did, ep->sid, 1034 ep->did, ep->sid,
981 FC_TYPE_FCP, f_ctl, 0); 1035 FC_TYPE_FCP, f_ctl, 0);
982 lport->tt.seq_send(lport, csp, conf_frame); 1036 fc_seq_send(lport, csp, conf_frame);
983 } 1037 }
984 } 1038 }
985 lport->tt.exch_done(seq); 1039 fc_exch_done(seq);
986 } 1040 }
987 /* 1041 /*
988 * Some resets driven by SCSI are not I/Os and do not have 1042 * Some resets driven by SCSI are not I/Os and do not have
@@ -1000,10 +1054,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
1000 */ 1054 */
1001static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) 1055static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
1002{ 1056{
1003 struct fc_lport *lport = fsp->lp;
1004
1005 if (fsp->seq_ptr) { 1057 if (fsp->seq_ptr) {
1006 lport->tt.exch_done(fsp->seq_ptr); 1058 fc_exch_done(fsp->seq_ptr);
1007 fsp->seq_ptr = NULL; 1059 fsp->seq_ptr = NULL;
1008 } 1060 }
1009 fsp->status_code = error; 1061 fsp->status_code = error;
@@ -1116,19 +1168,6 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1116} 1168}
1117 1169
1118/** 1170/**
1119 * get_fsp_rec_tov() - Helper function to get REC_TOV
1120 * @fsp: the FCP packet
1121 *
1122 * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
1123 */
1124static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
1125{
1126 struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
1127
1128 return msecs_to_jiffies(rpriv->e_d_tov) + HZ;
1129}
1130
1131/**
1132 * fc_fcp_cmd_send() - Send a FCP command 1171 * fc_fcp_cmd_send() - Send a FCP command
1133 * @lport: The local port to send the command on 1172 * @lport: The local port to send the command on
1134 * @fsp: The FCP packet the command is on 1173 * @fsp: The FCP packet the command is on
@@ -1165,8 +1204,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1165 rpriv->local_port->port_id, FC_TYPE_FCP, 1204 rpriv->local_port->port_id, FC_TYPE_FCP,
1166 FC_FCTL_REQ, 0); 1205 FC_FCTL_REQ, 0);
1167 1206
1168 seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, 1207 seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
1169 fsp, 0);
1170 if (!seq) { 1208 if (!seq) {
1171 rc = -1; 1209 rc = -1;
1172 goto unlock; 1210 goto unlock;
@@ -1196,7 +1234,7 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1196 return; 1234 return;
1197 1235
1198 if (error == -FC_EX_CLOSED) { 1236 if (error == -FC_EX_CLOSED) {
1199 fc_fcp_retry_cmd(fsp); 1237 fc_fcp_retry_cmd(fsp, FC_ERROR);
1200 goto unlock; 1238 goto unlock;
1201 } 1239 }
1202 1240
@@ -1222,8 +1260,16 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
1222 int rc = FAILED; 1260 int rc = FAILED;
1223 unsigned long ticks_left; 1261 unsigned long ticks_left;
1224 1262
1225 if (fc_fcp_send_abort(fsp)) 1263 FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state);
1264 if (fc_fcp_send_abort(fsp)) {
1265 FC_FCP_DBG(fsp, "failed to send abort\n");
1226 return FAILED; 1266 return FAILED;
1267 }
1268
1269 if (fsp->state & FC_SRB_ABORTED) {
1270 FC_FCP_DBG(fsp, "target abort cmd completed\n");
1271 return SUCCESS;
1272 }
1227 1273
1228 init_completion(&fsp->tm_done); 1274 init_completion(&fsp->tm_done);
1229 fsp->wait_for_comp = 1; 1275 fsp->wait_for_comp = 1;
@@ -1301,7 +1347,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1301 1347
1302 spin_lock_bh(&fsp->scsi_pkt_lock); 1348 spin_lock_bh(&fsp->scsi_pkt_lock);
1303 if (fsp->seq_ptr) { 1349 if (fsp->seq_ptr) {
1304 lport->tt.exch_done(fsp->seq_ptr); 1350 fc_exch_done(fsp->seq_ptr);
1305 fsp->seq_ptr = NULL; 1351 fsp->seq_ptr = NULL;
1306 } 1352 }
1307 fsp->wait_for_comp = 0; 1353 fsp->wait_for_comp = 0;
@@ -1355,7 +1401,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1355 if (fh->fh_type != FC_TYPE_BLS) 1401 if (fh->fh_type != FC_TYPE_BLS)
1356 fc_fcp_resp(fsp, fp); 1402 fc_fcp_resp(fsp, fp);
1357 fsp->seq_ptr = NULL; 1403 fsp->seq_ptr = NULL;
1358 fsp->lp->tt.exch_done(seq); 1404 fc_exch_done(seq);
1359out_unlock: 1405out_unlock:
1360 fc_fcp_unlock_pkt(fsp); 1406 fc_fcp_unlock_pkt(fsp);
1361out: 1407out:
@@ -1394,6 +1440,15 @@ static void fc_fcp_timeout(unsigned long data)
1394 if (fsp->cdb_cmd.fc_tm_flags) 1440 if (fsp->cdb_cmd.fc_tm_flags)
1395 goto unlock; 1441 goto unlock;
1396 1442
1443 if (fsp->lp->qfull) {
1444 FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
1445 fsp->timer_delay);
1446 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
1447 fc_fcp_timer_set(fsp, fsp->timer_delay);
1448 goto unlock;
1449 }
1450 FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n",
1451 fsp->timer_delay, rpriv->flags, fsp->state);
1397 fsp->state |= FC_SRB_FCP_PROCESSING_TMO; 1452 fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
1398 1453
1399 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) 1454 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
@@ -1486,8 +1541,8 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1486 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1541 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1487 switch (rjt->er_reason) { 1542 switch (rjt->er_reason) {
1488 default: 1543 default:
1489 FC_FCP_DBG(fsp, "device %x unexpected REC reject " 1544 FC_FCP_DBG(fsp,
1490 "reason %d expl %d\n", 1545 "device %x invalid REC reject %d/%d\n",
1491 fsp->rport->port_id, rjt->er_reason, 1546 fsp->rport->port_id, rjt->er_reason,
1492 rjt->er_explan); 1547 rjt->er_explan);
1493 /* fall through */ 1548 /* fall through */
@@ -1503,18 +1558,23 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1503 break; 1558 break;
1504 case ELS_RJT_LOGIC: 1559 case ELS_RJT_LOGIC:
1505 case ELS_RJT_UNAB: 1560 case ELS_RJT_UNAB:
1561 FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n",
1562 fsp->rport->port_id, rjt->er_reason,
1563 rjt->er_explan);
1506 /* 1564 /*
1507 * If no data transfer, the command frame got dropped 1565 * If response got lost or is stuck in the
1508 * so we just retry. If data was transferred, we 1566 * queue somewhere we have no idea if and when
1509 * lost the response but the target has no record, 1567 * the response will be received. So quarantine
1510 * so we abort and retry. 1568 * the xid and retry the command.
1511 */ 1569 */
1512 if (rjt->er_explan == ELS_EXPL_OXID_RXID && 1570 if (rjt->er_explan == ELS_EXPL_OXID_RXID) {
1513 fsp->xfer_len == 0) { 1571 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
1514 fc_fcp_retry_cmd(fsp); 1572 ep->state |= FC_EX_QUARANTINE;
1573 fsp->state |= FC_SRB_ABORTED;
1574 fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
1515 break; 1575 break;
1516 } 1576 }
1517 fc_fcp_recovery(fsp, FC_ERROR); 1577 fc_fcp_recovery(fsp, FC_TRANS_RESET);
1518 break; 1578 break;
1519 } 1579 }
1520 } else if (opcode == ELS_LS_ACC) { 1580 } else if (opcode == ELS_LS_ACC) {
@@ -1608,7 +1668,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1608 1668
1609 switch (error) { 1669 switch (error) {
1610 case -FC_EX_CLOSED: 1670 case -FC_EX_CLOSED:
1611 fc_fcp_retry_cmd(fsp); 1671 FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n",
1672 fsp, fsp->rport->port_id);
1673 fc_fcp_retry_cmd(fsp, FC_ERROR);
1612 break; 1674 break;
1613 1675
1614 default: 1676 default:
@@ -1622,8 +1684,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1622 * Assume REC or LS_ACC was lost. 1684 * Assume REC or LS_ACC was lost.
1623 * The exchange manager will have aborted REC, so retry. 1685 * The exchange manager will have aborted REC, so retry.
1624 */ 1686 */
1625 FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n", 1687 FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n",
1626 fsp->rport->port_id, error, fsp->recov_retry, 1688 fsp, fsp->rport->port_id, fsp->recov_retry,
1627 FC_MAX_RECOV_RETRY); 1689 FC_MAX_RECOV_RETRY);
1628 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1690 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1629 fc_fcp_rec(fsp); 1691 fc_fcp_rec(fsp);
@@ -1642,6 +1704,7 @@ out:
1642 */ 1704 */
1643static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code) 1705static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
1644{ 1706{
1707 FC_FCP_DBG(fsp, "start recovery code %x\n", code);
1645 fsp->status_code = code; 1708 fsp->status_code = code;
1646 fsp->cdb_status = 0; 1709 fsp->cdb_status = 0;
1647 fsp->io_status = 0; 1710 fsp->io_status = 0;
@@ -1668,7 +1731,6 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1668 struct fc_seq *seq; 1731 struct fc_seq *seq;
1669 struct fcp_srr *srr; 1732 struct fcp_srr *srr;
1670 struct fc_frame *fp; 1733 struct fc_frame *fp;
1671 unsigned int rec_tov;
1672 1734
1673 rport = fsp->rport; 1735 rport = fsp->rport;
1674 rpriv = rport->dd_data; 1736 rpriv = rport->dd_data;
@@ -1692,10 +1754,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1692 rpriv->local_port->port_id, FC_TYPE_FCP, 1754 rpriv->local_port->port_id, FC_TYPE_FCP,
1693 FC_FCTL_REQ, 0); 1755 FC_FCTL_REQ, 0);
1694 1756
1695 rec_tov = get_fsp_rec_tov(fsp); 1757 seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp,
1696 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, 1758 fc_fcp_pkt_destroy,
1697 fc_fcp_pkt_destroy, 1759 fsp, get_fsp_rec_tov(fsp));
1698 fsp, jiffies_to_msecs(rec_tov));
1699 if (!seq) 1760 if (!seq)
1700 goto retry; 1761 goto retry;
1701 1762
@@ -1706,7 +1767,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1706 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */ 1767 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
1707 return; 1768 return;
1708retry: 1769retry:
1709 fc_fcp_retry_cmd(fsp); 1770 fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
1710} 1771}
1711 1772
1712/** 1773/**
@@ -1730,9 +1791,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1730 1791
1731 fh = fc_frame_header_get(fp); 1792 fh = fc_frame_header_get(fp);
1732 /* 1793 /*
1733 * BUG? fc_fcp_srr_error calls exch_done which would release 1794 * BUG? fc_fcp_srr_error calls fc_exch_done which would release
1734 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT, 1795 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
1735 * then fc_exch_timeout would be sending an abort. The exch_done 1796 * then fc_exch_timeout would be sending an abort. The fc_exch_done
1736 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing 1797 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
1737 * an abort response though. 1798 * an abort response though.
1738 */ 1799 */
@@ -1753,7 +1814,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1753 } 1814 }
1754 fc_fcp_unlock_pkt(fsp); 1815 fc_fcp_unlock_pkt(fsp);
1755out: 1816out:
1756 fsp->lp->tt.exch_done(seq); 1817 fc_exch_done(seq);
1757 fc_frame_free(fp); 1818 fc_frame_free(fp);
1758} 1819}
1759 1820
@@ -1768,20 +1829,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1768 goto out; 1829 goto out;
1769 switch (PTR_ERR(fp)) { 1830 switch (PTR_ERR(fp)) {
1770 case -FC_EX_TIMEOUT: 1831 case -FC_EX_TIMEOUT:
1832 FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry);
1771 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1833 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1772 fc_fcp_rec(fsp); 1834 fc_fcp_rec(fsp);
1773 else 1835 else
1774 fc_fcp_recovery(fsp, FC_TIMED_OUT); 1836 fc_fcp_recovery(fsp, FC_TIMED_OUT);
1775 break; 1837 break;
1776 case -FC_EX_CLOSED: /* e.g., link failure */ 1838 case -FC_EX_CLOSED: /* e.g., link failure */
1839 FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
1777 /* fall through */ 1840 /* fall through */
1778 default: 1841 default:
1779 fc_fcp_retry_cmd(fsp); 1842 fc_fcp_retry_cmd(fsp, FC_ERROR);
1780 break; 1843 break;
1781 } 1844 }
1782 fc_fcp_unlock_pkt(fsp); 1845 fc_fcp_unlock_pkt(fsp);
1783out: 1846out:
1784 fsp->lp->tt.exch_done(fsp->recov_seq); 1847 fc_exch_done(fsp->recov_seq);
1785} 1848}
1786 1849
1787/** 1850/**
@@ -1832,8 +1895,13 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1832 rpriv = rport->dd_data; 1895 rpriv = rport->dd_data;
1833 1896
1834 if (!fc_fcp_lport_queue_ready(lport)) { 1897 if (!fc_fcp_lport_queue_ready(lport)) {
1835 if (lport->qfull) 1898 if (lport->qfull) {
1836 fc_fcp_can_queue_ramp_down(lport); 1899 fc_fcp_can_queue_ramp_down(lport);
1900 shost_printk(KERN_ERR, lport->host,
1901 "libfc: queue full, "
1902 "reducing can_queue to %d.\n",
1903 lport->host->can_queue);
1904 }
1837 rc = SCSI_MLQUEUE_HOST_BUSY; 1905 rc = SCSI_MLQUEUE_HOST_BUSY;
1838 goto out; 1906 goto out;
1839 } 1907 }
@@ -1980,15 +2048,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1980 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 2048 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1981 break; 2049 break;
1982 case FC_CMD_ABORTED: 2050 case FC_CMD_ABORTED:
1983 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " 2051 if (host_byte(sc_cmd->result) == DID_TIME_OUT)
1984 "due to FC_CMD_ABORTED\n"); 2052 FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
1985 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; 2053 "due to FC_CMD_ABORTED\n");
2054 else {
2055 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
2056 "due to FC_CMD_ABORTED\n");
2057 set_host_byte(sc_cmd, DID_ERROR);
2058 }
2059 sc_cmd->result |= fsp->io_status;
1986 break; 2060 break;
1987 case FC_CMD_RESET: 2061 case FC_CMD_RESET:
1988 FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml " 2062 FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
1989 "due to FC_CMD_RESET\n"); 2063 "due to FC_CMD_RESET\n");
1990 sc_cmd->result = (DID_RESET << 16); 2064 sc_cmd->result = (DID_RESET << 16);
1991 break; 2065 break;
2066 case FC_TRANS_RESET:
2067 FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml "
2068 "due to FC_TRANS_RESET\n");
2069 sc_cmd->result = (DID_SOFT_ERROR << 16);
2070 break;
1992 case FC_HRD_ERROR: 2071 case FC_HRD_ERROR:
1993 FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml " 2072 FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
1994 "due to FC_HRD_ERROR\n"); 2073 "due to FC_HRD_ERROR\n");
@@ -2142,7 +2221,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
2142 2221
2143 fc_block_scsi_eh(sc_cmd); 2222 fc_block_scsi_eh(sc_cmd);
2144 2223
2145 lport->tt.lport_reset(lport); 2224 fc_lport_reset(lport);
2146 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2225 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
2147 while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, 2226 while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
2148 wait_tmo)) 2227 wait_tmo))
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index c11a638f32e6..d623d084b7ec 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -226,7 +226,7 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
226 226
227 sp = fr_seq(in_fp); 227 sp = fr_seq(in_fp);
228 if (sp) 228 if (sp)
229 fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp); 229 fr_seq(fp) = fc_seq_start_next(sp);
230 fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset); 230 fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
231} 231}
232EXPORT_SYMBOL(fc_fill_reply_hdr); 232EXPORT_SYMBOL(fc_fill_reply_hdr);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 50c71678a156..919736a74ffa 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -149,7 +149,7 @@ static const char *fc_lport_state_names[] = {
149 * @offset: The offset into the response data 149 * @offset: The offset into the response data
150 */ 150 */
151struct fc_bsg_info { 151struct fc_bsg_info {
152 struct fc_bsg_job *job; 152 struct bsg_job *job;
153 struct fc_lport *lport; 153 struct fc_lport *lport;
154 u16 rsp_code; 154 u16 rsp_code;
155 struct scatterlist *sg; 155 struct scatterlist *sg;
@@ -200,7 +200,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
200 "in the DNS or FDMI state, it's in the " 200 "in the DNS or FDMI state, it's in the "
201 "%d state", rdata->ids.port_id, 201 "%d state", rdata->ids.port_id,
202 lport->state); 202 lport->state);
203 lport->tt.rport_logoff(rdata); 203 fc_rport_logoff(rdata);
204 } 204 }
205 break; 205 break;
206 case RPORT_EV_LOGO: 206 case RPORT_EV_LOGO:
@@ -237,23 +237,26 @@ static const char *fc_lport_state(struct fc_lport *lport)
237 * @remote_fid: The FID of the ptp rport 237 * @remote_fid: The FID of the ptp rport
238 * @remote_wwpn: The WWPN of the ptp rport 238 * @remote_wwpn: The WWPN of the ptp rport
239 * @remote_wwnn: The WWNN of the ptp rport 239 * @remote_wwnn: The WWNN of the ptp rport
240 *
241 * Locking Note: The lport lock is expected to be held before calling
242 * this routine.
240 */ 243 */
241static void fc_lport_ptp_setup(struct fc_lport *lport, 244static void fc_lport_ptp_setup(struct fc_lport *lport,
242 u32 remote_fid, u64 remote_wwpn, 245 u32 remote_fid, u64 remote_wwpn,
243 u64 remote_wwnn) 246 u64 remote_wwnn)
244{ 247{
245 mutex_lock(&lport->disc.disc_mutex);
246 if (lport->ptp_rdata) { 248 if (lport->ptp_rdata) {
247 lport->tt.rport_logoff(lport->ptp_rdata); 249 fc_rport_logoff(lport->ptp_rdata);
248 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 250 kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
249 } 251 }
250 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); 252 mutex_lock(&lport->disc.disc_mutex);
253 lport->ptp_rdata = fc_rport_create(lport, remote_fid);
251 kref_get(&lport->ptp_rdata->kref); 254 kref_get(&lport->ptp_rdata->kref);
252 lport->ptp_rdata->ids.port_name = remote_wwpn; 255 lport->ptp_rdata->ids.port_name = remote_wwpn;
253 lport->ptp_rdata->ids.node_name = remote_wwnn; 256 lport->ptp_rdata->ids.node_name = remote_wwnn;
254 mutex_unlock(&lport->disc.disc_mutex); 257 mutex_unlock(&lport->disc.disc_mutex);
255 258
256 lport->tt.rport_login(lport->ptp_rdata); 259 fc_rport_login(lport->ptp_rdata);
257 260
258 fc_lport_enter_ready(lport); 261 fc_lport_enter_ready(lport);
259} 262}
@@ -409,7 +412,7 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
409 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 412 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
410 fc_lport_state(lport)); 413 fc_lport_state(lport));
411 414
412 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 415 fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
413 fc_frame_free(fp); 416 fc_frame_free(fp);
414} 417}
415 418
@@ -478,7 +481,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
478 if (!req) { 481 if (!req) {
479 rjt_data.reason = ELS_RJT_LOGIC; 482 rjt_data.reason = ELS_RJT_LOGIC;
480 rjt_data.explan = ELS_EXPL_NONE; 483 rjt_data.explan = ELS_EXPL_NONE;
481 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 484 fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
482 } else { 485 } else {
483 fmt = req->rnid_fmt; 486 fmt = req->rnid_fmt;
484 len = sizeof(*rp); 487 len = sizeof(*rp);
@@ -518,7 +521,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
518 */ 521 */
519static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) 522static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
520{ 523{
521 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 524 fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
522 fc_lport_enter_reset(lport); 525 fc_lport_enter_reset(lport);
523 fc_frame_free(fp); 526 fc_frame_free(fp);
524} 527}
@@ -620,9 +623,9 @@ int fc_fabric_logoff(struct fc_lport *lport)
620 lport->tt.disc_stop_final(lport); 623 lport->tt.disc_stop_final(lport);
621 mutex_lock(&lport->lp_mutex); 624 mutex_lock(&lport->lp_mutex);
622 if (lport->dns_rdata) 625 if (lport->dns_rdata)
623 lport->tt.rport_logoff(lport->dns_rdata); 626 fc_rport_logoff(lport->dns_rdata);
624 mutex_unlock(&lport->lp_mutex); 627 mutex_unlock(&lport->lp_mutex);
625 lport->tt.rport_flush_queue(); 628 fc_rport_flush_queue();
626 mutex_lock(&lport->lp_mutex); 629 mutex_lock(&lport->lp_mutex);
627 fc_lport_enter_logo(lport); 630 fc_lport_enter_logo(lport);
628 mutex_unlock(&lport->lp_mutex); 631 mutex_unlock(&lport->lp_mutex);
@@ -899,7 +902,7 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
899 /* 902 /*
900 * Check opcode. 903 * Check opcode.
901 */ 904 */
902 recv = lport->tt.rport_recv_req; 905 recv = fc_rport_recv_req;
903 switch (fc_frame_payload_op(fp)) { 906 switch (fc_frame_payload_op(fp)) {
904 case ELS_FLOGI: 907 case ELS_FLOGI:
905 if (!lport->point_to_multipoint) 908 if (!lport->point_to_multipoint)
@@ -941,15 +944,14 @@ struct fc4_prov fc_lport_els_prov = {
941}; 944};
942 945
943/** 946/**
944 * fc_lport_recv_req() - The generic lport request handler 947 * fc_lport_recv() - The generic lport request handler
945 * @lport: The lport that received the request 948 * @lport: The lport that received the request
946 * @fp: The frame the request is in 949 * @fp: The frame the request is in
947 * 950 *
948 * Locking Note: This function should not be called with the lport 951 * Locking Note: This function should not be called with the lport
949 * lock held because it may grab the lock. 952 * lock held because it may grab the lock.
950 */ 953 */
951static void fc_lport_recv_req(struct fc_lport *lport, 954void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
952 struct fc_frame *fp)
953{ 955{
954 struct fc_frame_header *fh = fc_frame_header_get(fp); 956 struct fc_frame_header *fh = fc_frame_header_get(fp);
955 struct fc_seq *sp = fr_seq(fp); 957 struct fc_seq *sp = fr_seq(fp);
@@ -978,8 +980,9 @@ drop:
978 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); 980 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
979 fc_frame_free(fp); 981 fc_frame_free(fp);
980 if (sp) 982 if (sp)
981 lport->tt.exch_done(sp); 983 fc_exch_done(sp);
982} 984}
985EXPORT_SYMBOL(fc_lport_recv);
983 986
984/** 987/**
985 * fc_lport_reset() - Reset a local port 988 * fc_lport_reset() - Reset a local port
@@ -1007,12 +1010,14 @@ EXPORT_SYMBOL(fc_lport_reset);
1007 */ 1010 */
1008static void fc_lport_reset_locked(struct fc_lport *lport) 1011static void fc_lport_reset_locked(struct fc_lport *lport)
1009{ 1012{
1010 if (lport->dns_rdata) 1013 if (lport->dns_rdata) {
1011 lport->tt.rport_logoff(lport->dns_rdata); 1014 fc_rport_logoff(lport->dns_rdata);
1015 lport->dns_rdata = NULL;
1016 }
1012 1017
1013 if (lport->ptp_rdata) { 1018 if (lport->ptp_rdata) {
1014 lport->tt.rport_logoff(lport->ptp_rdata); 1019 fc_rport_logoff(lport->ptp_rdata);
1015 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 1020 kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
1016 lport->ptp_rdata = NULL; 1021 lport->ptp_rdata = NULL;
1017 } 1022 }
1018 1023
@@ -1426,13 +1431,13 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
1426 fc_lport_state_enter(lport, LPORT_ST_DNS); 1431 fc_lport_state_enter(lport, LPORT_ST_DNS);
1427 1432
1428 mutex_lock(&lport->disc.disc_mutex); 1433 mutex_lock(&lport->disc.disc_mutex);
1429 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); 1434 rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
1430 mutex_unlock(&lport->disc.disc_mutex); 1435 mutex_unlock(&lport->disc.disc_mutex);
1431 if (!rdata) 1436 if (!rdata)
1432 goto err; 1437 goto err;
1433 1438
1434 rdata->ops = &fc_lport_rport_ops; 1439 rdata->ops = &fc_lport_rport_ops;
1435 lport->tt.rport_login(rdata); 1440 fc_rport_login(rdata);
1436 return; 1441 return;
1437 1442
1438err: 1443err:
@@ -1543,13 +1548,13 @@ static void fc_lport_enter_fdmi(struct fc_lport *lport)
1543 fc_lport_state_enter(lport, LPORT_ST_FDMI); 1548 fc_lport_state_enter(lport, LPORT_ST_FDMI);
1544 1549
1545 mutex_lock(&lport->disc.disc_mutex); 1550 mutex_lock(&lport->disc.disc_mutex);
1546 rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV); 1551 rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
1547 mutex_unlock(&lport->disc.disc_mutex); 1552 mutex_unlock(&lport->disc.disc_mutex);
1548 if (!rdata) 1553 if (!rdata)
1549 goto err; 1554 goto err;
1550 1555
1551 rdata->ops = &fc_lport_rport_ops; 1556 rdata->ops = &fc_lport_rport_ops;
1552 lport->tt.rport_login(rdata); 1557 fc_rport_login(rdata);
1553 return; 1558 return;
1554 1559
1555err: 1560err:
@@ -1772,7 +1777,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1772 if ((csp_flags & FC_SP_FT_FPORT) == 0) { 1777 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1773 if (e_d_tov > lport->e_d_tov) 1778 if (e_d_tov > lport->e_d_tov)
1774 lport->e_d_tov = e_d_tov; 1779 lport->e_d_tov = e_d_tov;
1775 lport->r_a_tov = 2 * e_d_tov; 1780 lport->r_a_tov = 2 * lport->e_d_tov;
1776 fc_lport_set_port_id(lport, did, fp); 1781 fc_lport_set_port_id(lport, did, fp);
1777 printk(KERN_INFO "host%d: libfc: " 1782 printk(KERN_INFO "host%d: libfc: "
1778 "Port (%6.6x) entered " 1783 "Port (%6.6x) entered "
@@ -1784,8 +1789,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1784 get_unaligned_be64( 1789 get_unaligned_be64(
1785 &flp->fl_wwnn)); 1790 &flp->fl_wwnn));
1786 } else { 1791 } else {
1787 lport->e_d_tov = e_d_tov; 1792 if (e_d_tov > lport->e_d_tov)
1788 lport->r_a_tov = r_a_tov; 1793 lport->e_d_tov = e_d_tov;
1794 if (r_a_tov > lport->r_a_tov)
1795 lport->r_a_tov = r_a_tov;
1789 fc_host_fabric_name(lport->host) = 1796 fc_host_fabric_name(lport->host) =
1790 get_unaligned_be64(&flp->fl_wwnn); 1797 get_unaligned_be64(&flp->fl_wwnn);
1791 fc_lport_set_port_id(lport, did, fp); 1798 fc_lport_set_port_id(lport, did, fp);
@@ -1858,12 +1865,6 @@ EXPORT_SYMBOL(fc_lport_config);
1858 */ 1865 */
1859int fc_lport_init(struct fc_lport *lport) 1866int fc_lport_init(struct fc_lport *lport)
1860{ 1867{
1861 if (!lport->tt.lport_recv)
1862 lport->tt.lport_recv = fc_lport_recv_req;
1863
1864 if (!lport->tt.lport_reset)
1865 lport->tt.lport_reset = fc_lport_reset;
1866
1867 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1868 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1868 fc_host_node_name(lport->host) = lport->wwnn; 1869 fc_host_node_name(lport->host) = lport->wwnn;
1869 fc_host_port_name(lport->host) = lport->wwpn; 1870 fc_host_port_name(lport->host) = lport->wwpn;
@@ -1900,18 +1901,19 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1900 void *info_arg) 1901 void *info_arg)
1901{ 1902{
1902 struct fc_bsg_info *info = info_arg; 1903 struct fc_bsg_info *info = info_arg;
1903 struct fc_bsg_job *job = info->job; 1904 struct bsg_job *job = info->job;
1905 struct fc_bsg_reply *bsg_reply = job->reply;
1904 struct fc_lport *lport = info->lport; 1906 struct fc_lport *lport = info->lport;
1905 struct fc_frame_header *fh; 1907 struct fc_frame_header *fh;
1906 size_t len; 1908 size_t len;
1907 void *buf; 1909 void *buf;
1908 1910
1909 if (IS_ERR(fp)) { 1911 if (IS_ERR(fp)) {
1910 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? 1912 bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1911 -ECONNABORTED : -ETIMEDOUT; 1913 -ECONNABORTED : -ETIMEDOUT;
1912 job->reply_len = sizeof(uint32_t); 1914 job->reply_len = sizeof(uint32_t);
1913 job->state_flags |= FC_RQST_STATE_DONE; 1915 bsg_job_done(job, bsg_reply->result,
1914 job->job_done(job); 1916 bsg_reply->reply_payload_rcv_len);
1915 kfree(info); 1917 kfree(info);
1916 return; 1918 return;
1917 } 1919 }
@@ -1928,25 +1930,25 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1928 (unsigned short)fc_frame_payload_op(fp); 1930 (unsigned short)fc_frame_payload_op(fp);
1929 1931
1930 /* Save the reply status of the job */ 1932 /* Save the reply status of the job */
1931 job->reply->reply_data.ctels_reply.status = 1933 bsg_reply->reply_data.ctels_reply.status =
1932 (cmd == info->rsp_code) ? 1934 (cmd == info->rsp_code) ?
1933 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; 1935 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1934 } 1936 }
1935 1937
1936 job->reply->reply_payload_rcv_len += 1938 bsg_reply->reply_payload_rcv_len +=
1937 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, 1939 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1938 &info->offset, NULL); 1940 &info->offset, NULL);
1939 1941
1940 if (fr_eof(fp) == FC_EOF_T && 1942 if (fr_eof(fp) == FC_EOF_T &&
1941 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1943 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1942 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1944 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1943 if (job->reply->reply_payload_rcv_len > 1945 if (bsg_reply->reply_payload_rcv_len >
1944 job->reply_payload.payload_len) 1946 job->reply_payload.payload_len)
1945 job->reply->reply_payload_rcv_len = 1947 bsg_reply->reply_payload_rcv_len =
1946 job->reply_payload.payload_len; 1948 job->reply_payload.payload_len;
1947 job->reply->result = 0; 1949 bsg_reply->result = 0;
1948 job->state_flags |= FC_RQST_STATE_DONE; 1950 bsg_job_done(job, bsg_reply->result,
1949 job->job_done(job); 1951 bsg_reply->reply_payload_rcv_len);
1950 kfree(info); 1952 kfree(info);
1951 } 1953 }
1952 fc_frame_free(fp); 1954 fc_frame_free(fp);
@@ -1962,7 +1964,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1962 * Locking Note: The lport lock is expected to be held before calling 1964 * Locking Note: The lport lock is expected to be held before calling
1963 * this routine. 1965 * this routine.
1964 */ 1966 */
1965static int fc_lport_els_request(struct fc_bsg_job *job, 1967static int fc_lport_els_request(struct bsg_job *job,
1966 struct fc_lport *lport, 1968 struct fc_lport *lport,
1967 u32 did, u32 tov) 1969 u32 did, u32 tov)
1968{ 1970{
@@ -2005,8 +2007,8 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
2005 info->nents = job->reply_payload.sg_cnt; 2007 info->nents = job->reply_payload.sg_cnt;
2006 info->sg = job->reply_payload.sg_list; 2008 info->sg = job->reply_payload.sg_list;
2007 2009
2008 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 2010 if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
2009 NULL, info, tov)) { 2011 NULL, info, tov)) {
2010 kfree(info); 2012 kfree(info);
2011 return -ECOMM; 2013 return -ECOMM;
2012 } 2014 }
@@ -2023,7 +2025,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
2023 * Locking Note: The lport lock is expected to be held before calling 2025 * Locking Note: The lport lock is expected to be held before calling
2024 * this routine. 2026 * this routine.
2025 */ 2027 */
2026static int fc_lport_ct_request(struct fc_bsg_job *job, 2028static int fc_lport_ct_request(struct bsg_job *job,
2027 struct fc_lport *lport, u32 did, u32 tov) 2029 struct fc_lport *lport, u32 did, u32 tov)
2028{ 2030{
2029 struct fc_bsg_info *info; 2031 struct fc_bsg_info *info;
@@ -2066,8 +2068,8 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
2066 info->nents = job->reply_payload.sg_cnt; 2068 info->nents = job->reply_payload.sg_cnt;
2067 info->sg = job->reply_payload.sg_list; 2069 info->sg = job->reply_payload.sg_list;
2068 2070
2069 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 2071 if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
2070 NULL, info, tov)) { 2072 NULL, info, tov)) {
2071 kfree(info); 2073 kfree(info);
2072 return -ECOMM; 2074 return -ECOMM;
2073 } 2075 }
@@ -2079,25 +2081,27 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
2079 * FC Passthrough requests 2081 * FC Passthrough requests
2080 * @job: The BSG passthrough job 2082 * @job: The BSG passthrough job
2081 */ 2083 */
2082int fc_lport_bsg_request(struct fc_bsg_job *job) 2084int fc_lport_bsg_request(struct bsg_job *job)
2083{ 2085{
2086 struct fc_bsg_request *bsg_request = job->request;
2087 struct fc_bsg_reply *bsg_reply = job->reply;
2084 struct request *rsp = job->req->next_rq; 2088 struct request *rsp = job->req->next_rq;
2085 struct Scsi_Host *shost = job->shost; 2089 struct Scsi_Host *shost = fc_bsg_to_shost(job);
2086 struct fc_lport *lport = shost_priv(shost); 2090 struct fc_lport *lport = shost_priv(shost);
2087 struct fc_rport *rport; 2091 struct fc_rport *rport;
2088 struct fc_rport_priv *rdata; 2092 struct fc_rport_priv *rdata;
2089 int rc = -EINVAL; 2093 int rc = -EINVAL;
2090 u32 did, tov; 2094 u32 did, tov;
2091 2095
2092 job->reply->reply_payload_rcv_len = 0; 2096 bsg_reply->reply_payload_rcv_len = 0;
2093 if (rsp) 2097 if (rsp)
2094 rsp->resid_len = job->reply_payload.payload_len; 2098 rsp->resid_len = job->reply_payload.payload_len;
2095 2099
2096 mutex_lock(&lport->lp_mutex); 2100 mutex_lock(&lport->lp_mutex);
2097 2101
2098 switch (job->request->msgcode) { 2102 switch (bsg_request->msgcode) {
2099 case FC_BSG_RPT_ELS: 2103 case FC_BSG_RPT_ELS:
2100 rport = job->rport; 2104 rport = fc_bsg_to_rport(job);
2101 if (!rport) 2105 if (!rport)
2102 break; 2106 break;
2103 2107
@@ -2107,7 +2111,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
2107 break; 2111 break;
2108 2112
2109 case FC_BSG_RPT_CT: 2113 case FC_BSG_RPT_CT:
2110 rport = job->rport; 2114 rport = fc_bsg_to_rport(job);
2111 if (!rport) 2115 if (!rport)
2112 break; 2116 break;
2113 2117
@@ -2117,25 +2121,25 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
2117 break; 2121 break;
2118 2122
2119 case FC_BSG_HST_CT: 2123 case FC_BSG_HST_CT:
2120 did = ntoh24(job->request->rqst_data.h_ct.port_id); 2124 did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
2121 if (did == FC_FID_DIR_SERV) { 2125 if (did == FC_FID_DIR_SERV) {
2122 rdata = lport->dns_rdata; 2126 rdata = lport->dns_rdata;
2123 if (!rdata) 2127 if (!rdata)
2124 break; 2128 break;
2125 tov = rdata->e_d_tov; 2129 tov = rdata->e_d_tov;
2126 } else { 2130 } else {
2127 rdata = lport->tt.rport_lookup(lport, did); 2131 rdata = fc_rport_lookup(lport, did);
2128 if (!rdata) 2132 if (!rdata)
2129 break; 2133 break;
2130 tov = rdata->e_d_tov; 2134 tov = rdata->e_d_tov;
2131 kref_put(&rdata->kref, lport->tt.rport_destroy); 2135 kref_put(&rdata->kref, fc_rport_destroy);
2132 } 2136 }
2133 2137
2134 rc = fc_lport_ct_request(job, lport, did, tov); 2138 rc = fc_lport_ct_request(job, lport, did, tov);
2135 break; 2139 break;
2136 2140
2137 case FC_BSG_HST_ELS_NOLOGIN: 2141 case FC_BSG_HST_ELS_NOLOGIN:
2138 did = ntoh24(job->request->rqst_data.h_els.port_id); 2142 did = ntoh24(bsg_request->rqst_data.h_els.port_id);
2139 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); 2143 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
2140 break; 2144 break;
2141 } 2145 }
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 97aeaddd600d..c991f3b822f8 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -44,6 +44,19 @@
44 * path this potential over-use of the mutex is acceptable. 44 * path this potential over-use of the mutex is acceptable.
45 */ 45 */
46 46
47/*
48 * RPORT REFERENCE COUNTING
49 *
50 * A rport reference should be taken when:
51 * - an rport is allocated
52 * - a workqueue item is scheduled
53 * - an ELS request is send
54 * The reference should be dropped when:
55 * - the workqueue function has finished
56 * - the ELS response is handled
57 * - an rport is removed
58 */
59
47#include <linux/kernel.h> 60#include <linux/kernel.h>
48#include <linux/spinlock.h> 61#include <linux/spinlock.h>
49#include <linux/interrupt.h> 62#include <linux/interrupt.h>
@@ -74,8 +87,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
74static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *); 87static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
75static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *); 88static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
76static void fc_rport_timeout(struct work_struct *); 89static void fc_rport_timeout(struct work_struct *);
77static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *); 90static void fc_rport_error(struct fc_rport_priv *, int);
78static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *); 91static void fc_rport_error_retry(struct fc_rport_priv *, int);
79static void fc_rport_work(struct work_struct *); 92static void fc_rport_work(struct work_struct *);
80 93
81static const char *fc_rport_state_names[] = { 94static const char *fc_rport_state_names[] = {
@@ -98,8 +111,8 @@ static const char *fc_rport_state_names[] = {
98 * The reference count of the fc_rport_priv structure is 111 * The reference count of the fc_rport_priv structure is
99 * increased by one. 112 * increased by one.
100 */ 113 */
101static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, 114struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
102 u32 port_id) 115 u32 port_id)
103{ 116{
104 struct fc_rport_priv *rdata = NULL, *tmp_rdata; 117 struct fc_rport_priv *rdata = NULL, *tmp_rdata;
105 118
@@ -113,6 +126,7 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
113 rcu_read_unlock(); 126 rcu_read_unlock();
114 return rdata; 127 return rdata;
115} 128}
129EXPORT_SYMBOL(fc_rport_lookup);
116 130
117/** 131/**
118 * fc_rport_create() - Create a new remote port 132 * fc_rport_create() - Create a new remote port
@@ -123,12 +137,11 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
123 * 137 *
124 * Locking note: must be called with the disc_mutex held. 138 * Locking note: must be called with the disc_mutex held.
125 */ 139 */
126static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, 140struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
127 u32 port_id)
128{ 141{
129 struct fc_rport_priv *rdata; 142 struct fc_rport_priv *rdata;
130 143
131 rdata = lport->tt.rport_lookup(lport, port_id); 144 rdata = fc_rport_lookup(lport, port_id);
132 if (rdata) 145 if (rdata)
133 return rdata; 146 return rdata;
134 147
@@ -158,18 +171,20 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
158 } 171 }
159 return rdata; 172 return rdata;
160} 173}
174EXPORT_SYMBOL(fc_rport_create);
161 175
162/** 176/**
163 * fc_rport_destroy() - Free a remote port after last reference is released 177 * fc_rport_destroy() - Free a remote port after last reference is released
164 * @kref: The remote port's kref 178 * @kref: The remote port's kref
165 */ 179 */
166static void fc_rport_destroy(struct kref *kref) 180void fc_rport_destroy(struct kref *kref)
167{ 181{
168 struct fc_rport_priv *rdata; 182 struct fc_rport_priv *rdata;
169 183
170 rdata = container_of(kref, struct fc_rport_priv, kref); 184 rdata = container_of(kref, struct fc_rport_priv, kref);
171 kfree_rcu(rdata, rcu); 185 kfree_rcu(rdata, rcu);
172} 186}
187EXPORT_SYMBOL(fc_rport_destroy);
173 188
174/** 189/**
175 * fc_rport_state() - Return a string identifying the remote port's state 190 * fc_rport_state() - Return a string identifying the remote port's state
@@ -242,6 +257,8 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata,
242/** 257/**
243 * fc_rport_work() - Handler for remote port events in the rport_event_queue 258 * fc_rport_work() - Handler for remote port events in the rport_event_queue
244 * @work: Handle to the remote port being dequeued 259 * @work: Handle to the remote port being dequeued
260 *
261 * Reference counting: drops kref on return
245 */ 262 */
246static void fc_rport_work(struct work_struct *work) 263static void fc_rport_work(struct work_struct *work)
247{ 264{
@@ -272,12 +289,14 @@ static void fc_rport_work(struct work_struct *work)
272 kref_get(&rdata->kref); 289 kref_get(&rdata->kref);
273 mutex_unlock(&rdata->rp_mutex); 290 mutex_unlock(&rdata->rp_mutex);
274 291
275 if (!rport) 292 if (!rport) {
293 FC_RPORT_DBG(rdata, "No rport!\n");
276 rport = fc_remote_port_add(lport->host, 0, &ids); 294 rport = fc_remote_port_add(lport->host, 0, &ids);
295 }
277 if (!rport) { 296 if (!rport) {
278 FC_RPORT_DBG(rdata, "Failed to add the rport\n"); 297 FC_RPORT_DBG(rdata, "Failed to add the rport\n");
279 lport->tt.rport_logoff(rdata); 298 fc_rport_logoff(rdata);
280 kref_put(&rdata->kref, lport->tt.rport_destroy); 299 kref_put(&rdata->kref, fc_rport_destroy);
281 return; 300 return;
282 } 301 }
283 mutex_lock(&rdata->rp_mutex); 302 mutex_lock(&rdata->rp_mutex);
@@ -303,7 +322,7 @@ static void fc_rport_work(struct work_struct *work)
303 FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); 322 FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
304 rdata->lld_event_callback(lport, rdata, event); 323 rdata->lld_event_callback(lport, rdata, event);
305 } 324 }
306 kref_put(&rdata->kref, lport->tt.rport_destroy); 325 kref_put(&rdata->kref, fc_rport_destroy);
307 break; 326 break;
308 327
309 case RPORT_EV_FAILED: 328 case RPORT_EV_FAILED:
@@ -329,7 +348,8 @@ static void fc_rport_work(struct work_struct *work)
329 FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); 348 FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
330 rdata->lld_event_callback(lport, rdata, event); 349 rdata->lld_event_callback(lport, rdata, event);
331 } 350 }
332 cancel_delayed_work_sync(&rdata->retry_work); 351 if (cancel_delayed_work_sync(&rdata->retry_work))
352 kref_put(&rdata->kref, fc_rport_destroy);
333 353
334 /* 354 /*
335 * Reset any outstanding exchanges before freeing rport. 355 * Reset any outstanding exchanges before freeing rport.
@@ -351,7 +371,7 @@ static void fc_rport_work(struct work_struct *work)
351 if (port_id == FC_FID_DIR_SERV) { 371 if (port_id == FC_FID_DIR_SERV) {
352 rdata->event = RPORT_EV_NONE; 372 rdata->event = RPORT_EV_NONE;
353 mutex_unlock(&rdata->rp_mutex); 373 mutex_unlock(&rdata->rp_mutex);
354 kref_put(&rdata->kref, lport->tt.rport_destroy); 374 kref_put(&rdata->kref, fc_rport_destroy);
355 } else if ((rdata->flags & FC_RP_STARTED) && 375 } else if ((rdata->flags & FC_RP_STARTED) &&
356 rdata->major_retries < 376 rdata->major_retries <
357 lport->max_rport_retry_count) { 377 lport->max_rport_retry_count) {
@@ -362,17 +382,21 @@ static void fc_rport_work(struct work_struct *work)
362 mutex_unlock(&rdata->rp_mutex); 382 mutex_unlock(&rdata->rp_mutex);
363 } else { 383 } else {
364 FC_RPORT_DBG(rdata, "work delete\n"); 384 FC_RPORT_DBG(rdata, "work delete\n");
385 mutex_lock(&lport->disc.disc_mutex);
365 list_del_rcu(&rdata->peers); 386 list_del_rcu(&rdata->peers);
387 mutex_unlock(&lport->disc.disc_mutex);
366 mutex_unlock(&rdata->rp_mutex); 388 mutex_unlock(&rdata->rp_mutex);
367 kref_put(&rdata->kref, lport->tt.rport_destroy); 389 kref_put(&rdata->kref, fc_rport_destroy);
368 } 390 }
369 } else { 391 } else {
370 /* 392 /*
371 * Re-open for events. Reissue READY event if ready. 393 * Re-open for events. Reissue READY event if ready.
372 */ 394 */
373 rdata->event = RPORT_EV_NONE; 395 rdata->event = RPORT_EV_NONE;
374 if (rdata->rp_state == RPORT_ST_READY) 396 if (rdata->rp_state == RPORT_ST_READY) {
397 FC_RPORT_DBG(rdata, "work reopen\n");
375 fc_rport_enter_ready(rdata); 398 fc_rport_enter_ready(rdata);
399 }
376 mutex_unlock(&rdata->rp_mutex); 400 mutex_unlock(&rdata->rp_mutex);
377 } 401 }
378 break; 402 break;
@@ -381,12 +405,21 @@ static void fc_rport_work(struct work_struct *work)
381 mutex_unlock(&rdata->rp_mutex); 405 mutex_unlock(&rdata->rp_mutex);
382 break; 406 break;
383 } 407 }
408 kref_put(&rdata->kref, fc_rport_destroy);
384} 409}
385 410
386/** 411/**
387 * fc_rport_login() - Start the remote port login state machine 412 * fc_rport_login() - Start the remote port login state machine
388 * @rdata: The remote port to be logged in to 413 * @rdata: The remote port to be logged in to
389 * 414 *
415 * Initiates the RP state machine. It is called from the LP module.
416 * This function will issue the following commands to the N_Port
417 * identified by the FC ID provided.
418 *
419 * - PLOGI
420 * - PRLI
421 * - RTV
422 *
390 * Locking Note: Called without the rport lock held. This 423 * Locking Note: Called without the rport lock held. This
391 * function will hold the rport lock, call an _enter_* 424 * function will hold the rport lock, call an _enter_*
392 * function and then unlock the rport. 425 * function and then unlock the rport.
@@ -395,10 +428,16 @@ static void fc_rport_work(struct work_struct *work)
395 * If it appears we are already logged in, ADISC is used to verify 428 * If it appears we are already logged in, ADISC is used to verify
396 * the setup. 429 * the setup.
397 */ 430 */
398static int fc_rport_login(struct fc_rport_priv *rdata) 431int fc_rport_login(struct fc_rport_priv *rdata)
399{ 432{
400 mutex_lock(&rdata->rp_mutex); 433 mutex_lock(&rdata->rp_mutex);
401 434
435 if (rdata->flags & FC_RP_STARTED) {
436 FC_RPORT_DBG(rdata, "port already started\n");
437 mutex_unlock(&rdata->rp_mutex);
438 return 0;
439 }
440
402 rdata->flags |= FC_RP_STARTED; 441 rdata->flags |= FC_RP_STARTED;
403 switch (rdata->rp_state) { 442 switch (rdata->rp_state) {
404 case RPORT_ST_READY: 443 case RPORT_ST_READY:
@@ -408,15 +447,20 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
408 case RPORT_ST_DELETE: 447 case RPORT_ST_DELETE:
409 FC_RPORT_DBG(rdata, "Restart deleted port\n"); 448 FC_RPORT_DBG(rdata, "Restart deleted port\n");
410 break; 449 break;
411 default: 450 case RPORT_ST_INIT:
412 FC_RPORT_DBG(rdata, "Login to port\n"); 451 FC_RPORT_DBG(rdata, "Login to port\n");
413 fc_rport_enter_flogi(rdata); 452 fc_rport_enter_flogi(rdata);
414 break; 453 break;
454 default:
455 FC_RPORT_DBG(rdata, "Login in progress, state %s\n",
456 fc_rport_state(rdata));
457 break;
415 } 458 }
416 mutex_unlock(&rdata->rp_mutex); 459 mutex_unlock(&rdata->rp_mutex);
417 460
418 return 0; 461 return 0;
419} 462}
463EXPORT_SYMBOL(fc_rport_login);
420 464
421/** 465/**
422 * fc_rport_enter_delete() - Schedule a remote port to be deleted 466 * fc_rport_enter_delete() - Schedule a remote port to be deleted
@@ -431,6 +475,8 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
431 * Set the new event so that the old pending event will not occur. 475 * Set the new event so that the old pending event will not occur.
432 * Since we have the mutex, even if fc_rport_work() is already started, 476 * Since we have the mutex, even if fc_rport_work() is already started,
433 * it'll see the new event. 477 * it'll see the new event.
478 *
479 * Reference counting: does not modify kref
434 */ 480 */
435static void fc_rport_enter_delete(struct fc_rport_priv *rdata, 481static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
436 enum fc_rport_event event) 482 enum fc_rport_event event)
@@ -442,8 +488,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
442 488
443 fc_rport_state_enter(rdata, RPORT_ST_DELETE); 489 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
444 490
445 if (rdata->event == RPORT_EV_NONE) 491 kref_get(&rdata->kref);
446 queue_work(rport_event_queue, &rdata->event_work); 492 if (rdata->event == RPORT_EV_NONE &&
493 !queue_work(rport_event_queue, &rdata->event_work))
494 kref_put(&rdata->kref, fc_rport_destroy);
495
447 rdata->event = event; 496 rdata->event = event;
448} 497}
449 498
@@ -455,7 +504,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
455 * function will hold the rport lock, call an _enter_* 504 * function will hold the rport lock, call an _enter_*
456 * function and then unlock the rport. 505 * function and then unlock the rport.
457 */ 506 */
458static int fc_rport_logoff(struct fc_rport_priv *rdata) 507int fc_rport_logoff(struct fc_rport_priv *rdata)
459{ 508{
460 struct fc_lport *lport = rdata->local_port; 509 struct fc_lport *lport = rdata->local_port;
461 u32 port_id = rdata->ids.port_id; 510 u32 port_id = rdata->ids.port_id;
@@ -489,6 +538,7 @@ out:
489 mutex_unlock(&rdata->rp_mutex); 538 mutex_unlock(&rdata->rp_mutex);
490 return 0; 539 return 0;
491} 540}
541EXPORT_SYMBOL(fc_rport_logoff);
492 542
493/** 543/**
494 * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state 544 * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
@@ -496,6 +546,8 @@ out:
496 * 546 *
497 * Locking Note: The rport lock is expected to be held before calling 547 * Locking Note: The rport lock is expected to be held before calling
498 * this routine. 548 * this routine.
549 *
550 * Reference counting: schedules workqueue, does not modify kref
499 */ 551 */
500static void fc_rport_enter_ready(struct fc_rport_priv *rdata) 552static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
501{ 553{
@@ -503,8 +555,11 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
503 555
504 FC_RPORT_DBG(rdata, "Port is Ready\n"); 556 FC_RPORT_DBG(rdata, "Port is Ready\n");
505 557
506 if (rdata->event == RPORT_EV_NONE) 558 kref_get(&rdata->kref);
507 queue_work(rport_event_queue, &rdata->event_work); 559 if (rdata->event == RPORT_EV_NONE &&
560 !queue_work(rport_event_queue, &rdata->event_work))
561 kref_put(&rdata->kref, fc_rport_destroy);
562
508 rdata->event = RPORT_EV_READY; 563 rdata->event = RPORT_EV_READY;
509} 564}
510 565
@@ -515,6 +570,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
515 * Locking Note: Called without the rport lock held. This 570 * Locking Note: Called without the rport lock held. This
516 * function will hold the rport lock, call an _enter_* 571 * function will hold the rport lock, call an _enter_*
517 * function and then unlock the rport. 572 * function and then unlock the rport.
573 *
574 * Reference counting: Drops kref on return.
518 */ 575 */
519static void fc_rport_timeout(struct work_struct *work) 576static void fc_rport_timeout(struct work_struct *work)
520{ 577{
@@ -522,6 +579,7 @@ static void fc_rport_timeout(struct work_struct *work)
522 container_of(work, struct fc_rport_priv, retry_work.work); 579 container_of(work, struct fc_rport_priv, retry_work.work);
523 580
524 mutex_lock(&rdata->rp_mutex); 581 mutex_lock(&rdata->rp_mutex);
582 FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata));
525 583
526 switch (rdata->rp_state) { 584 switch (rdata->rp_state) {
527 case RPORT_ST_FLOGI: 585 case RPORT_ST_FLOGI:
@@ -547,23 +605,25 @@ static void fc_rport_timeout(struct work_struct *work)
547 } 605 }
548 606
549 mutex_unlock(&rdata->rp_mutex); 607 mutex_unlock(&rdata->rp_mutex);
608 kref_put(&rdata->kref, fc_rport_destroy);
550} 609}
551 610
552/** 611/**
553 * fc_rport_error() - Error handler, called once retries have been exhausted 612 * fc_rport_error() - Error handler, called once retries have been exhausted
554 * @rdata: The remote port the error is happened on 613 * @rdata: The remote port the error is happened on
555 * @fp: The error code encapsulated in a frame pointer 614 * @err: The error code
556 * 615 *
557 * Locking Note: The rport lock is expected to be held before 616 * Locking Note: The rport lock is expected to be held before
558 * calling this routine 617 * calling this routine
618 *
619 * Reference counting: does not modify kref
559 */ 620 */
560static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) 621static void fc_rport_error(struct fc_rport_priv *rdata, int err)
561{ 622{
562 struct fc_lport *lport = rdata->local_port; 623 struct fc_lport *lport = rdata->local_port;
563 624
564 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n", 625 FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
565 IS_ERR(fp) ? -PTR_ERR(fp) : 0, 626 -err, fc_rport_state(rdata), rdata->retries);
566 fc_rport_state(rdata), rdata->retries);
567 627
568 switch (rdata->rp_state) { 628 switch (rdata->rp_state) {
569 case RPORT_ST_FLOGI: 629 case RPORT_ST_FLOGI:
@@ -595,36 +655,39 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
595/** 655/**
596 * fc_rport_error_retry() - Handler for remote port state retries 656 * fc_rport_error_retry() - Handler for remote port state retries
597 * @rdata: The remote port whose state is to be retried 657 * @rdata: The remote port whose state is to be retried
598 * @fp: The error code encapsulated in a frame pointer 658 * @err: The error code
599 * 659 *
600 * If the error was an exchange timeout retry immediately, 660 * If the error was an exchange timeout retry immediately,
601 * otherwise wait for E_D_TOV. 661 * otherwise wait for E_D_TOV.
602 * 662 *
603 * Locking Note: The rport lock is expected to be held before 663 * Locking Note: The rport lock is expected to be held before
604 * calling this routine 664 * calling this routine
665 *
666 * Reference counting: increments kref when scheduling retry_work
605 */ 667 */
606static void fc_rport_error_retry(struct fc_rport_priv *rdata, 668static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
607 struct fc_frame *fp)
608{ 669{
609 unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV); 670 unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
610 671
611 /* make sure this isn't an FC_EX_CLOSED error, never retry those */ 672 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
612 if (PTR_ERR(fp) == -FC_EX_CLOSED) 673 if (err == -FC_EX_CLOSED)
613 goto out; 674 goto out;
614 675
615 if (rdata->retries < rdata->local_port->max_rport_retry_count) { 676 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
616 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n", 677 FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n",
617 PTR_ERR(fp), fc_rport_state(rdata)); 678 err, fc_rport_state(rdata));
618 rdata->retries++; 679 rdata->retries++;
619 /* no additional delay on exchange timeouts */ 680 /* no additional delay on exchange timeouts */
620 if (PTR_ERR(fp) == -FC_EX_TIMEOUT) 681 if (err == -FC_EX_TIMEOUT)
621 delay = 0; 682 delay = 0;
622 schedule_delayed_work(&rdata->retry_work, delay); 683 kref_get(&rdata->kref);
684 if (!schedule_delayed_work(&rdata->retry_work, delay))
685 kref_put(&rdata->kref, fc_rport_destroy);
623 return; 686 return;
624 } 687 }
625 688
626out: 689out:
627 fc_rport_error(rdata, fp); 690 fc_rport_error(rdata, err);
628} 691}
629 692
630/** 693/**
@@ -684,8 +747,11 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
684 struct fc_lport *lport = rdata->local_port; 747 struct fc_lport *lport = rdata->local_port;
685 struct fc_els_flogi *flogi; 748 struct fc_els_flogi *flogi;
686 unsigned int r_a_tov; 749 unsigned int r_a_tov;
750 u8 opcode;
751 int err = 0;
687 752
688 FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp)); 753 FC_RPORT_DBG(rdata, "Received a FLOGI %s\n",
754 IS_ERR(fp) ? "error" : fc_els_resp_type(fp));
689 755
690 if (fp == ERR_PTR(-FC_EX_CLOSED)) 756 if (fp == ERR_PTR(-FC_EX_CLOSED))
691 goto put; 757 goto put;
@@ -701,18 +767,34 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
701 } 767 }
702 768
703 if (IS_ERR(fp)) { 769 if (IS_ERR(fp)) {
704 fc_rport_error(rdata, fp); 770 fc_rport_error(rdata, PTR_ERR(fp));
705 goto err; 771 goto err;
706 } 772 }
707 773 opcode = fc_frame_payload_op(fp);
708 if (fc_frame_payload_op(fp) != ELS_LS_ACC) 774 if (opcode == ELS_LS_RJT) {
775 struct fc_els_ls_rjt *rjt;
776
777 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
778 FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n",
779 rjt->er_reason, rjt->er_explan);
780 err = -FC_EX_ELS_RJT;
709 goto bad; 781 goto bad;
710 if (fc_rport_login_complete(rdata, fp)) 782 } else if (opcode != ELS_LS_ACC) {
783 FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode);
784 err = -FC_EX_ELS_RJT;
711 goto bad; 785 goto bad;
786 }
787 if (fc_rport_login_complete(rdata, fp)) {
788 FC_RPORT_DBG(rdata, "FLOGI failed, no login\n");
789 err = -FC_EX_INV_LOGIN;
790 goto bad;
791 }
712 792
713 flogi = fc_frame_payload_get(fp, sizeof(*flogi)); 793 flogi = fc_frame_payload_get(fp, sizeof(*flogi));
714 if (!flogi) 794 if (!flogi) {
795 err = -FC_EX_ALLOC_ERR;
715 goto bad; 796 goto bad;
797 }
716 r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov); 798 r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
717 if (r_a_tov > rdata->r_a_tov) 799 if (r_a_tov > rdata->r_a_tov)
718 rdata->r_a_tov = r_a_tov; 800 rdata->r_a_tov = r_a_tov;
@@ -726,11 +808,11 @@ out:
726err: 808err:
727 mutex_unlock(&rdata->rp_mutex); 809 mutex_unlock(&rdata->rp_mutex);
728put: 810put:
729 kref_put(&rdata->kref, lport->tt.rport_destroy); 811 kref_put(&rdata->kref, fc_rport_destroy);
730 return; 812 return;
731bad: 813bad:
732 FC_RPORT_DBG(rdata, "Bad FLOGI response\n"); 814 FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
733 fc_rport_error_retry(rdata, fp); 815 fc_rport_error_retry(rdata, err);
734 goto out; 816 goto out;
735} 817}
736 818
@@ -740,6 +822,8 @@ bad:
740 * 822 *
741 * Locking Note: The rport lock is expected to be held before calling 823 * Locking Note: The rport lock is expected to be held before calling
742 * this routine. 824 * this routine.
825 *
826 * Reference counting: increments kref when sending ELS
743 */ 827 */
744static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) 828static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
745{ 829{
@@ -756,20 +840,23 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
756 840
757 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 841 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
758 if (!fp) 842 if (!fp)
759 return fc_rport_error_retry(rdata, fp); 843 return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
760 844
845 kref_get(&rdata->kref);
761 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI, 846 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
762 fc_rport_flogi_resp, rdata, 847 fc_rport_flogi_resp, rdata,
763 2 * lport->r_a_tov)) 848 2 * lport->r_a_tov)) {
764 fc_rport_error_retry(rdata, NULL); 849 fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
765 else 850 kref_put(&rdata->kref, fc_rport_destroy);
766 kref_get(&rdata->kref); 851 }
767} 852}
768 853
769/** 854/**
770 * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode 855 * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
771 * @lport: The local port that received the PLOGI request 856 * @lport: The local port that received the PLOGI request
772 * @rx_fp: The PLOGI request frame 857 * @rx_fp: The PLOGI request frame
858 *
859 * Reference counting: drops kref on return
773 */ 860 */
774static void fc_rport_recv_flogi_req(struct fc_lport *lport, 861static void fc_rport_recv_flogi_req(struct fc_lport *lport,
775 struct fc_frame *rx_fp) 862 struct fc_frame *rx_fp)
@@ -799,7 +886,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
799 goto reject; 886 goto reject;
800 } 887 }
801 888
802 rdata = lport->tt.rport_lookup(lport, sid); 889 rdata = fc_rport_lookup(lport, sid);
803 if (!rdata) { 890 if (!rdata) {
804 rjt_data.reason = ELS_RJT_FIP; 891 rjt_data.reason = ELS_RJT_FIP;
805 rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; 892 rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
@@ -824,8 +911,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
824 * RPORT wouldn;t have created and 'rport_lookup' would have 911 * RPORT wouldn;t have created and 'rport_lookup' would have
825 * failed anyway in that case. 912 * failed anyway in that case.
826 */ 913 */
827 if (lport->point_to_multipoint) 914 break;
828 break;
829 case RPORT_ST_DELETE: 915 case RPORT_ST_DELETE:
830 mutex_unlock(&rdata->rp_mutex); 916 mutex_unlock(&rdata->rp_mutex);
831 rjt_data.reason = ELS_RJT_FIP; 917 rjt_data.reason = ELS_RJT_FIP;
@@ -867,20 +953,27 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
867 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 953 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
868 lport->tt.frame_send(lport, fp); 954 lport->tt.frame_send(lport, fp);
869 955
870 if (rdata->ids.port_name < lport->wwpn) 956 /*
871 fc_rport_enter_plogi(rdata); 957 * Do not proceed with the state machine if our
872 else 958 * FLOGI has crossed with an FLOGI from the
873 fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); 959 * remote port; wait for the FLOGI response instead.
960 */
961 if (rdata->rp_state != RPORT_ST_FLOGI) {
962 if (rdata->ids.port_name < lport->wwpn)
963 fc_rport_enter_plogi(rdata);
964 else
965 fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
966 }
874out: 967out:
875 mutex_unlock(&rdata->rp_mutex); 968 mutex_unlock(&rdata->rp_mutex);
876 kref_put(&rdata->kref, lport->tt.rport_destroy); 969 kref_put(&rdata->kref, fc_rport_destroy);
877 fc_frame_free(rx_fp); 970 fc_frame_free(rx_fp);
878 return; 971 return;
879 972
880reject_put: 973reject_put:
881 kref_put(&rdata->kref, lport->tt.rport_destroy); 974 kref_put(&rdata->kref, fc_rport_destroy);
882reject: 975reject:
883 lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); 976 fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
884 fc_frame_free(rx_fp); 977 fc_frame_free(rx_fp);
885} 978}
886 979
@@ -904,10 +997,13 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
904 u16 cssp_seq; 997 u16 cssp_seq;
905 u8 op; 998 u8 op;
906 999
907 mutex_lock(&rdata->rp_mutex);
908
909 FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp)); 1000 FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
910 1001
1002 if (fp == ERR_PTR(-FC_EX_CLOSED))
1003 goto put;
1004
1005 mutex_lock(&rdata->rp_mutex);
1006
911 if (rdata->rp_state != RPORT_ST_PLOGI) { 1007 if (rdata->rp_state != RPORT_ST_PLOGI) {
912 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state " 1008 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
913 "%s\n", fc_rport_state(rdata)); 1009 "%s\n", fc_rport_state(rdata));
@@ -917,7 +1013,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
917 } 1013 }
918 1014
919 if (IS_ERR(fp)) { 1015 if (IS_ERR(fp)) {
920 fc_rport_error_retry(rdata, fp); 1016 fc_rport_error_retry(rdata, PTR_ERR(fp));
921 goto err; 1017 goto err;
922 } 1018 }
923 1019
@@ -939,14 +1035,20 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
939 rdata->max_seq = csp_seq; 1035 rdata->max_seq = csp_seq;
940 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs); 1036 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
941 fc_rport_enter_prli(rdata); 1037 fc_rport_enter_prli(rdata);
942 } else 1038 } else {
943 fc_rport_error_retry(rdata, fp); 1039 struct fc_els_ls_rjt *rjt;
944 1040
1041 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1042 FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
1043 rjt->er_reason, rjt->er_explan);
1044 fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
1045 }
945out: 1046out:
946 fc_frame_free(fp); 1047 fc_frame_free(fp);
947err: 1048err:
948 mutex_unlock(&rdata->rp_mutex); 1049 mutex_unlock(&rdata->rp_mutex);
949 kref_put(&rdata->kref, lport->tt.rport_destroy); 1050put:
1051 kref_put(&rdata->kref, fc_rport_destroy);
950} 1052}
951 1053
952static bool 1054static bool
@@ -969,6 +1071,8 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
969 * 1071 *
970 * Locking Note: The rport lock is expected to be held before calling 1072 * Locking Note: The rport lock is expected to be held before calling
971 * this routine. 1073 * this routine.
1074 *
1075 * Reference counting: increments kref when sending ELS
972 */ 1076 */
973static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) 1077static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
974{ 1078{
@@ -990,17 +1094,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
990 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 1094 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
991 if (!fp) { 1095 if (!fp) {
992 FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__); 1096 FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
993 fc_rport_error_retry(rdata, fp); 1097 fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
994 return; 1098 return;
995 } 1099 }
996 rdata->e_d_tov = lport->e_d_tov; 1100 rdata->e_d_tov = lport->e_d_tov;
997 1101
1102 kref_get(&rdata->kref);
998 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, 1103 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
999 fc_rport_plogi_resp, rdata, 1104 fc_rport_plogi_resp, rdata,
1000 2 * lport->r_a_tov)) 1105 2 * lport->r_a_tov)) {
1001 fc_rport_error_retry(rdata, NULL); 1106 fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
1002 else 1107 kref_put(&rdata->kref, fc_rport_destroy);
1003 kref_get(&rdata->kref); 1108 }
1004} 1109}
1005 1110
1006/** 1111/**
@@ -1022,16 +1127,20 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
1022 struct fc_els_spp spp; 1127 struct fc_els_spp spp;
1023 } *pp; 1128 } *pp;
1024 struct fc_els_spp temp_spp; 1129 struct fc_els_spp temp_spp;
1130 struct fc_els_ls_rjt *rjt;
1025 struct fc4_prov *prov; 1131 struct fc4_prov *prov;
1026 u32 roles = FC_RPORT_ROLE_UNKNOWN; 1132 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1027 u32 fcp_parm = 0; 1133 u32 fcp_parm = 0;
1028 u8 op; 1134 u8 op;
1029 u8 resp_code = 0; 1135 enum fc_els_spp_resp resp_code;
1030
1031 mutex_lock(&rdata->rp_mutex);
1032 1136
1033 FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp)); 1137 FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
1034 1138
1139 if (fp == ERR_PTR(-FC_EX_CLOSED))
1140 goto put;
1141
1142 mutex_lock(&rdata->rp_mutex);
1143
1035 if (rdata->rp_state != RPORT_ST_PRLI) { 1144 if (rdata->rp_state != RPORT_ST_PRLI) {
1036 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state " 1145 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
1037 "%s\n", fc_rport_state(rdata)); 1146 "%s\n", fc_rport_state(rdata));
@@ -1041,7 +1150,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
1041 } 1150 }
1042 1151
1043 if (IS_ERR(fp)) { 1152 if (IS_ERR(fp)) {
1044 fc_rport_error_retry(rdata, fp); 1153 fc_rport_error_retry(rdata, PTR_ERR(fp));
1045 goto err; 1154 goto err;
1046 } 1155 }
1047 1156
@@ -1055,14 +1164,14 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
1055 goto out; 1164 goto out;
1056 1165
1057 resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); 1166 resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
1058 FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n", 1167 FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
1059 pp->spp.spp_flags); 1168 pp->spp.spp_flags, pp->spp.spp_type);
1060 rdata->spp_type = pp->spp.spp_type; 1169 rdata->spp_type = pp->spp.spp_type;
1061 if (resp_code != FC_SPP_RESP_ACK) { 1170 if (resp_code != FC_SPP_RESP_ACK) {
1062 if (resp_code == FC_SPP_RESP_CONF) 1171 if (resp_code == FC_SPP_RESP_CONF)
1063 fc_rport_error(rdata, fp); 1172 fc_rport_error(rdata, -FC_EX_SEQ_ERR);
1064 else 1173 else
1065 fc_rport_error_retry(rdata, fp); 1174 fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
1066 goto out; 1175 goto out;
1067 } 1176 }
1068 if (pp->prli.prli_spp_len < sizeof(pp->spp)) 1177 if (pp->prli.prli_spp_len < sizeof(pp->spp))
@@ -1074,13 +1183,25 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
1074 if (fcp_parm & FCP_SPPF_CONF_COMPL) 1183 if (fcp_parm & FCP_SPPF_CONF_COMPL)
1075 rdata->flags |= FC_RP_FLAGS_CONF_REQ; 1184 rdata->flags |= FC_RP_FLAGS_CONF_REQ;
1076 1185
1077 prov = fc_passive_prov[FC_TYPE_FCP]; 1186 /*
1187 * Call prli provider if we should act as a target
1188 */
1189 prov = fc_passive_prov[rdata->spp_type];
1078 if (prov) { 1190 if (prov) {
1079 memset(&temp_spp, 0, sizeof(temp_spp)); 1191 memset(&temp_spp, 0, sizeof(temp_spp));
1080 prov->prli(rdata, pp->prli.prli_spp_len, 1192 prov->prli(rdata, pp->prli.prli_spp_len,
1081 &pp->spp, &temp_spp); 1193 &pp->spp, &temp_spp);
1082 } 1194 }
1083 1195 /*
1196 * Check if the image pair could be established
1197 */
1198 if (rdata->spp_type != FC_TYPE_FCP ||
1199 !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) {
1200 /*
1201 * Nope; we can't use this port as a target.
1202 */
1203 fcp_parm &= ~FCP_SPPF_TARG_FCN;
1204 }
1084 rdata->supported_classes = FC_COS_CLASS3; 1205 rdata->supported_classes = FC_COS_CLASS3;
1085 if (fcp_parm & FCP_SPPF_INIT_FCN) 1206 if (fcp_parm & FCP_SPPF_INIT_FCN)
1086 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 1207 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
@@ -1091,15 +1212,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
1091 fc_rport_enter_rtv(rdata); 1212 fc_rport_enter_rtv(rdata);
1092 1213
1093 } else { 1214 } else {
1094 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n"); 1215 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1095 fc_rport_error_retry(rdata, fp); 1216 FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
1217 rjt->er_reason, rjt->er_explan);
1218 fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
1096 } 1219 }
1097 1220
1098out: 1221out:
1099 fc_frame_free(fp); 1222 fc_frame_free(fp);
1100err: 1223err:
1101 mutex_unlock(&rdata->rp_mutex); 1224 mutex_unlock(&rdata->rp_mutex);
1102 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); 1225put:
1226 kref_put(&rdata->kref, fc_rport_destroy);
1103} 1227}
1104 1228
1105/** 1229/**
@@ -1108,6 +1232,8 @@ err:
1108 * 1232 *
1109 * Locking Note: The rport lock is expected to be held before calling 1233 * Locking Note: The rport lock is expected to be held before calling
1110 * this routine. 1234 * this routine.
1235 *
1236 * Reference counting: increments kref when sending ELS
1111 */ 1237 */
1112static void fc_rport_enter_prli(struct fc_rport_priv *rdata) 1238static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
1113{ 1239{
@@ -1128,6 +1254,15 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
1128 return; 1254 return;
1129 } 1255 }
1130 1256
1257 /*
1258 * And if the local port does not support the initiator function
1259 * there's no need to send a PRLI, either.
1260 */
1261 if (!(lport->service_params & FCP_SPPF_INIT_FCN)) {
1262 fc_rport_enter_ready(rdata);
1263 return;
1264 }
1265
1131 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n", 1266 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
1132 fc_rport_state(rdata)); 1267 fc_rport_state(rdata));
1133 1268
@@ -1135,7 +1270,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
1135 1270
1136 fp = fc_frame_alloc(lport, sizeof(*pp)); 1271 fp = fc_frame_alloc(lport, sizeof(*pp));
1137 if (!fp) { 1272 if (!fp) {
1138 fc_rport_error_retry(rdata, fp); 1273 fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
1139 return; 1274 return;
1140 } 1275 }
1141 1276
@@ -1151,15 +1286,16 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
1151 fc_host_port_id(lport->host), FC_TYPE_ELS, 1286 fc_host_port_id(lport->host), FC_TYPE_ELS,
1152 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1287 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1153 1288
1154 if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp, 1289 kref_get(&rdata->kref);
1155 NULL, rdata, 2 * lport->r_a_tov)) 1290 if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp,
1156 fc_rport_error_retry(rdata, NULL); 1291 NULL, rdata, 2 * lport->r_a_tov)) {
1157 else 1292 fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
1158 kref_get(&rdata->kref); 1293 kref_put(&rdata->kref, fc_rport_destroy);
1294 }
1159} 1295}
1160 1296
1161/** 1297/**
1162 * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses 1298 * fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses
1163 * @sp: The sequence the RTV was on 1299 * @sp: The sequence the RTV was on
1164 * @fp: The RTV response frame 1300 * @fp: The RTV response frame
1165 * @rdata_arg: The remote port that sent the RTV response 1301 * @rdata_arg: The remote port that sent the RTV response
@@ -1176,10 +1312,13 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
1176 struct fc_rport_priv *rdata = rdata_arg; 1312 struct fc_rport_priv *rdata = rdata_arg;
1177 u8 op; 1313 u8 op;
1178 1314
1179 mutex_lock(&rdata->rp_mutex);
1180
1181 FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp)); 1315 FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
1182 1316
1317 if (fp == ERR_PTR(-FC_EX_CLOSED))
1318 goto put;
1319
1320 mutex_lock(&rdata->rp_mutex);
1321
1183 if (rdata->rp_state != RPORT_ST_RTV) { 1322 if (rdata->rp_state != RPORT_ST_RTV) {
1184 FC_RPORT_DBG(rdata, "Received a RTV response, but in state " 1323 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
1185 "%s\n", fc_rport_state(rdata)); 1324 "%s\n", fc_rport_state(rdata));
@@ -1189,7 +1328,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
1189 } 1328 }
1190 1329
1191 if (IS_ERR(fp)) { 1330 if (IS_ERR(fp)) {
1192 fc_rport_error(rdata, fp); 1331 fc_rport_error(rdata, PTR_ERR(fp));
1193 goto err; 1332 goto err;
1194 } 1333 }
1195 1334
@@ -1205,13 +1344,15 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
1205 tov = ntohl(rtv->rtv_r_a_tov); 1344 tov = ntohl(rtv->rtv_r_a_tov);
1206 if (tov == 0) 1345 if (tov == 0)
1207 tov = 1; 1346 tov = 1;
1208 rdata->r_a_tov = tov; 1347 if (tov > rdata->r_a_tov)
1348 rdata->r_a_tov = tov;
1209 tov = ntohl(rtv->rtv_e_d_tov); 1349 tov = ntohl(rtv->rtv_e_d_tov);
1210 if (toq & FC_ELS_RTV_EDRES) 1350 if (toq & FC_ELS_RTV_EDRES)
1211 tov /= 1000000; 1351 tov /= 1000000;
1212 if (tov == 0) 1352 if (tov == 0)
1213 tov = 1; 1353 tov = 1;
1214 rdata->e_d_tov = tov; 1354 if (tov > rdata->e_d_tov)
1355 rdata->e_d_tov = tov;
1215 } 1356 }
1216 } 1357 }
1217 1358
@@ -1221,7 +1362,8 @@ out:
1221 fc_frame_free(fp); 1362 fc_frame_free(fp);
1222err: 1363err:
1223 mutex_unlock(&rdata->rp_mutex); 1364 mutex_unlock(&rdata->rp_mutex);
1224 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); 1365put:
1366 kref_put(&rdata->kref, fc_rport_destroy);
1225} 1367}
1226 1368
1227/** 1369/**
@@ -1230,6 +1372,8 @@ err:
1230 * 1372 *
1231 * Locking Note: The rport lock is expected to be held before calling 1373 * Locking Note: The rport lock is expected to be held before calling
1232 * this routine. 1374 * this routine.
1375 *
1376 * Reference counting: increments kref when sending ELS
1233 */ 1377 */
1234static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) 1378static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
1235{ 1379{
@@ -1243,16 +1387,52 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
1243 1387
1244 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); 1388 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
1245 if (!fp) { 1389 if (!fp) {
1246 fc_rport_error_retry(rdata, fp); 1390 fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
1247 return; 1391 return;
1248 } 1392 }
1249 1393
1394 kref_get(&rdata->kref);
1250 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, 1395 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
1251 fc_rport_rtv_resp, rdata, 1396 fc_rport_rtv_resp, rdata,
1252 2 * lport->r_a_tov)) 1397 2 * lport->r_a_tov)) {
1253 fc_rport_error_retry(rdata, NULL); 1398 fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
1254 else 1399 kref_put(&rdata->kref, fc_rport_destroy);
1255 kref_get(&rdata->kref); 1400 }
1401}
1402
1403/**
1404 * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
1405 * @rdata: The remote port that sent the RTV request
1406 * @in_fp: The RTV request frame
1407 *
1408 * Locking Note: Called with the lport and rport locks held.
1409 */
1410static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
1411 struct fc_frame *in_fp)
1412{
1413 struct fc_lport *lport = rdata->local_port;
1414 struct fc_frame *fp;
1415 struct fc_els_rtv_acc *rtv;
1416 struct fc_seq_els_data rjt_data;
1417
1418 FC_RPORT_DBG(rdata, "Received RTV request\n");
1419
1420 fp = fc_frame_alloc(lport, sizeof(*rtv));
1421 if (!fp) {
1422 rjt_data.reason = ELS_RJT_UNAB;
1423 rjt_data.reason = ELS_EXPL_INSUF_RES;
1424 fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
1425 goto drop;
1426 }
1427 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
1428 rtv->rtv_cmd = ELS_LS_ACC;
1429 rtv->rtv_r_a_tov = htonl(lport->r_a_tov);
1430 rtv->rtv_e_d_tov = htonl(lport->e_d_tov);
1431 rtv->rtv_toq = 0;
1432 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
1433 lport->tt.frame_send(lport, fp);
1434drop:
1435 fc_frame_free(in_fp);
1256} 1436}
1257 1437
1258/** 1438/**
@@ -1262,15 +1442,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
1262 * @lport_arg: The local port 1442 * @lport_arg: The local port
1263 */ 1443 */
1264static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, 1444static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1265 void *lport_arg) 1445 void *rdata_arg)
1266{ 1446{
1267 struct fc_lport *lport = lport_arg; 1447 struct fc_rport_priv *rdata = rdata_arg;
1448 struct fc_lport *lport = rdata->local_port;
1268 1449
1269 FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did, 1450 FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
1270 "Received a LOGO %s\n", fc_els_resp_type(fp)); 1451 "Received a LOGO %s\n", fc_els_resp_type(fp));
1271 if (IS_ERR(fp)) 1452 if (!IS_ERR(fp))
1272 return; 1453 fc_frame_free(fp);
1273 fc_frame_free(fp); 1454 kref_put(&rdata->kref, fc_rport_destroy);
1274} 1455}
1275 1456
1276/** 1457/**
@@ -1279,6 +1460,8 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1279 * 1460 *
1280 * Locking Note: The rport lock is expected to be held before calling 1461 * Locking Note: The rport lock is expected to be held before calling
1281 * this routine. 1462 * this routine.
1463 *
1464 * Reference counting: increments kref when sending ELS
1282 */ 1465 */
1283static void fc_rport_enter_logo(struct fc_rport_priv *rdata) 1466static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
1284{ 1467{
@@ -1291,8 +1474,10 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
1291 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); 1474 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
1292 if (!fp) 1475 if (!fp)
1293 return; 1476 return;
1294 (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, 1477 kref_get(&rdata->kref);
1295 fc_rport_logo_resp, lport, 0); 1478 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
1479 fc_rport_logo_resp, rdata, 0))
1480 kref_put(&rdata->kref, fc_rport_destroy);
1296} 1481}
1297 1482
1298/** 1483/**
@@ -1312,10 +1497,13 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
1312 struct fc_els_adisc *adisc; 1497 struct fc_els_adisc *adisc;
1313 u8 op; 1498 u8 op;
1314 1499
1315 mutex_lock(&rdata->rp_mutex);
1316
1317 FC_RPORT_DBG(rdata, "Received a ADISC response\n"); 1500 FC_RPORT_DBG(rdata, "Received a ADISC response\n");
1318 1501
1502 if (fp == ERR_PTR(-FC_EX_CLOSED))
1503 goto put;
1504
1505 mutex_lock(&rdata->rp_mutex);
1506
1319 if (rdata->rp_state != RPORT_ST_ADISC) { 1507 if (rdata->rp_state != RPORT_ST_ADISC) {
1320 FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n", 1508 FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
1321 fc_rport_state(rdata)); 1509 fc_rport_state(rdata));
@@ -1325,7 +1513,7 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
1325 } 1513 }
1326 1514
1327 if (IS_ERR(fp)) { 1515 if (IS_ERR(fp)) {
1328 fc_rport_error(rdata, fp); 1516 fc_rport_error(rdata, PTR_ERR(fp));
1329 goto err; 1517 goto err;
1330 } 1518 }
1331 1519
@@ -1350,7 +1538,8 @@ out:
1350 fc_frame_free(fp); 1538 fc_frame_free(fp);
1351err: 1539err:
1352 mutex_unlock(&rdata->rp_mutex); 1540 mutex_unlock(&rdata->rp_mutex);
1353 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); 1541put:
1542 kref_put(&rdata->kref, fc_rport_destroy);
1354} 1543}
1355 1544
1356/** 1545/**
@@ -1359,6 +1548,8 @@ err:
1359 * 1548 *
1360 * Locking Note: The rport lock is expected to be held before calling 1549 * Locking Note: The rport lock is expected to be held before calling
1361 * this routine. 1550 * this routine.
1551 *
1552 * Reference counting: increments kref when sending ELS
1362 */ 1553 */
1363static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) 1554static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
1364{ 1555{
@@ -1372,15 +1563,16 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
1372 1563
1373 fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc)); 1564 fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
1374 if (!fp) { 1565 if (!fp) {
1375 fc_rport_error_retry(rdata, fp); 1566 fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
1376 return; 1567 return;
1377 } 1568 }
1569 kref_get(&rdata->kref);
1378 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, 1570 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
1379 fc_rport_adisc_resp, rdata, 1571 fc_rport_adisc_resp, rdata,
1380 2 * lport->r_a_tov)) 1572 2 * lport->r_a_tov)) {
1381 fc_rport_error_retry(rdata, NULL); 1573 fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
1382 else 1574 kref_put(&rdata->kref, fc_rport_destroy);
1383 kref_get(&rdata->kref); 1575 }
1384} 1576}
1385 1577
1386/** 1578/**
@@ -1404,7 +1596,7 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
1404 if (!adisc) { 1596 if (!adisc) {
1405 rjt_data.reason = ELS_RJT_PROT; 1597 rjt_data.reason = ELS_RJT_PROT;
1406 rjt_data.explan = ELS_EXPL_INV_LEN; 1598 rjt_data.explan = ELS_EXPL_INV_LEN;
1407 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 1599 fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
1408 goto drop; 1600 goto drop;
1409 } 1601 }
1410 1602
@@ -1480,7 +1672,7 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
1480 goto out; 1672 goto out;
1481 1673
1482out_rjt: 1674out_rjt:
1483 lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); 1675 fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
1484out: 1676out:
1485 fc_frame_free(rx_fp); 1677 fc_frame_free(rx_fp);
1486} 1678}
@@ -1494,15 +1686,21 @@ out:
1494 * The ELS opcode has already been validated by the caller. 1686 * The ELS opcode has already been validated by the caller.
1495 * 1687 *
1496 * Locking Note: Called with the lport lock held. 1688 * Locking Note: Called with the lport lock held.
1689 *
1690 * Reference counting: does not modify kref
1497 */ 1691 */
1498static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) 1692static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
1499{ 1693{
1500 struct fc_rport_priv *rdata; 1694 struct fc_rport_priv *rdata;
1501 struct fc_seq_els_data els_data; 1695 struct fc_seq_els_data els_data;
1502 1696
1503 rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp)); 1697 rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
1504 if (!rdata) 1698 if (!rdata) {
1699 FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
1700 "Received ELS 0x%02x from non-logged-in port\n",
1701 fc_frame_payload_op(fp));
1505 goto reject; 1702 goto reject;
1703 }
1506 1704
1507 mutex_lock(&rdata->rp_mutex); 1705 mutex_lock(&rdata->rp_mutex);
1508 1706
@@ -1512,9 +1710,21 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
1512 case RPORT_ST_READY: 1710 case RPORT_ST_READY:
1513 case RPORT_ST_ADISC: 1711 case RPORT_ST_ADISC:
1514 break; 1712 break;
1713 case RPORT_ST_PLOGI:
1714 if (fc_frame_payload_op(fp) == ELS_PRLI) {
1715 FC_RPORT_DBG(rdata, "Reject ELS PRLI "
1716 "while in state %s\n",
1717 fc_rport_state(rdata));
1718 mutex_unlock(&rdata->rp_mutex);
1719 kref_put(&rdata->kref, fc_rport_destroy);
1720 goto busy;
1721 }
1515 default: 1722 default:
1723 FC_RPORT_DBG(rdata,
1724 "Reject ELS 0x%02x while in state %s\n",
1725 fc_frame_payload_op(fp), fc_rport_state(rdata));
1516 mutex_unlock(&rdata->rp_mutex); 1726 mutex_unlock(&rdata->rp_mutex);
1517 kref_put(&rdata->kref, lport->tt.rport_destroy); 1727 kref_put(&rdata->kref, fc_rport_destroy);
1518 goto reject; 1728 goto reject;
1519 } 1729 }
1520 1730
@@ -1529,30 +1739,41 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
1529 fc_rport_recv_adisc_req(rdata, fp); 1739 fc_rport_recv_adisc_req(rdata, fp);
1530 break; 1740 break;
1531 case ELS_RRQ: 1741 case ELS_RRQ:
1532 lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL); 1742 fc_seq_els_rsp_send(fp, ELS_RRQ, NULL);
1533 fc_frame_free(fp); 1743 fc_frame_free(fp);
1534 break; 1744 break;
1535 case ELS_REC: 1745 case ELS_REC:
1536 lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL); 1746 fc_seq_els_rsp_send(fp, ELS_REC, NULL);
1537 fc_frame_free(fp); 1747 fc_frame_free(fp);
1538 break; 1748 break;
1539 case ELS_RLS: 1749 case ELS_RLS:
1540 fc_rport_recv_rls_req(rdata, fp); 1750 fc_rport_recv_rls_req(rdata, fp);
1541 break; 1751 break;
1752 case ELS_RTV:
1753 fc_rport_recv_rtv_req(rdata, fp);
1754 break;
1542 default: 1755 default:
1543 fc_frame_free(fp); /* can't happen */ 1756 fc_frame_free(fp); /* can't happen */
1544 break; 1757 break;
1545 } 1758 }
1546 1759
1547 mutex_unlock(&rdata->rp_mutex); 1760 mutex_unlock(&rdata->rp_mutex);
1548 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); 1761 kref_put(&rdata->kref, fc_rport_destroy);
1549 return; 1762 return;
1550 1763
1551reject: 1764reject:
1552 els_data.reason = ELS_RJT_UNAB; 1765 els_data.reason = ELS_RJT_UNAB;
1553 els_data.explan = ELS_EXPL_PLOGI_REQD; 1766 els_data.explan = ELS_EXPL_PLOGI_REQD;
1554 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); 1767 fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
1768 fc_frame_free(fp);
1769 return;
1770
1771busy:
1772 els_data.reason = ELS_RJT_BUSY;
1773 els_data.explan = ELS_EXPL_NONE;
1774 fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
1555 fc_frame_free(fp); 1775 fc_frame_free(fp);
1776 return;
1556} 1777}
1557 1778
1558/** 1779/**
@@ -1561,8 +1782,10 @@ reject:
1561 * @fp: The request frame 1782 * @fp: The request frame
1562 * 1783 *
1563 * Locking Note: Called with the lport lock held. 1784 * Locking Note: Called with the lport lock held.
1785 *
1786 * Reference counting: does not modify kref
1564 */ 1787 */
1565static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) 1788void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
1566{ 1789{
1567 struct fc_seq_els_data els_data; 1790 struct fc_seq_els_data els_data;
1568 1791
@@ -1588,16 +1811,18 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
1588 case ELS_RRQ: 1811 case ELS_RRQ:
1589 case ELS_REC: 1812 case ELS_REC:
1590 case ELS_RLS: 1813 case ELS_RLS:
1814 case ELS_RTV:
1591 fc_rport_recv_els_req(lport, fp); 1815 fc_rport_recv_els_req(lport, fp);
1592 break; 1816 break;
1593 default: 1817 default:
1594 els_data.reason = ELS_RJT_UNSUP; 1818 els_data.reason = ELS_RJT_UNSUP;
1595 els_data.explan = ELS_EXPL_NONE; 1819 els_data.explan = ELS_EXPL_NONE;
1596 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); 1820 fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
1597 fc_frame_free(fp); 1821 fc_frame_free(fp);
1598 break; 1822 break;
1599 } 1823 }
1600} 1824}
1825EXPORT_SYMBOL(fc_rport_recv_req);
1601 1826
1602/** 1827/**
1603 * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests 1828 * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
@@ -1605,6 +1830,8 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
1605 * @rx_fp: The PLOGI request frame 1830 * @rx_fp: The PLOGI request frame
1606 * 1831 *
1607 * Locking Note: The rport lock is held before calling this function. 1832 * Locking Note: The rport lock is held before calling this function.
1833 *
1834 * Reference counting: increments kref on return
1608 */ 1835 */
1609static void fc_rport_recv_plogi_req(struct fc_lport *lport, 1836static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1610 struct fc_frame *rx_fp) 1837 struct fc_frame *rx_fp)
@@ -1630,7 +1857,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1630 1857
1631 disc = &lport->disc; 1858 disc = &lport->disc;
1632 mutex_lock(&disc->disc_mutex); 1859 mutex_lock(&disc->disc_mutex);
1633 rdata = lport->tt.rport_create(lport, sid); 1860 rdata = fc_rport_create(lport, sid);
1634 if (!rdata) { 1861 if (!rdata) {
1635 mutex_unlock(&disc->disc_mutex); 1862 mutex_unlock(&disc->disc_mutex);
1636 rjt_data.reason = ELS_RJT_UNAB; 1863 rjt_data.reason = ELS_RJT_UNAB;
@@ -1718,7 +1945,7 @@ out:
1718 return; 1945 return;
1719 1946
1720reject: 1947reject:
1721 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); 1948 fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
1722 fc_frame_free(fp); 1949 fc_frame_free(fp);
1723} 1950}
1724 1951
@@ -1744,7 +1971,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1744 unsigned int len; 1971 unsigned int len;
1745 unsigned int plen; 1972 unsigned int plen;
1746 enum fc_els_spp_resp resp; 1973 enum fc_els_spp_resp resp;
1747 enum fc_els_spp_resp passive;
1748 struct fc_seq_els_data rjt_data; 1974 struct fc_seq_els_data rjt_data;
1749 struct fc4_prov *prov; 1975 struct fc4_prov *prov;
1750 1976
@@ -1794,15 +2020,21 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1794 resp = 0; 2020 resp = 0;
1795 2021
1796 if (rspp->spp_type < FC_FC4_PROV_SIZE) { 2022 if (rspp->spp_type < FC_FC4_PROV_SIZE) {
2023 enum fc_els_spp_resp active = 0, passive = 0;
2024
1797 prov = fc_active_prov[rspp->spp_type]; 2025 prov = fc_active_prov[rspp->spp_type];
1798 if (prov) 2026 if (prov)
1799 resp = prov->prli(rdata, plen, rspp, spp); 2027 active = prov->prli(rdata, plen, rspp, spp);
1800 prov = fc_passive_prov[rspp->spp_type]; 2028 prov = fc_passive_prov[rspp->spp_type];
1801 if (prov) { 2029 if (prov)
1802 passive = prov->prli(rdata, plen, rspp, spp); 2030 passive = prov->prli(rdata, plen, rspp, spp);
1803 if (!resp || passive == FC_SPP_RESP_ACK) 2031 if (!active || passive == FC_SPP_RESP_ACK)
1804 resp = passive; 2032 resp = passive;
1805 } 2033 else
2034 resp = active;
2035 FC_RPORT_DBG(rdata, "PRLI rspp type %x "
2036 "active %x passive %x\n",
2037 rspp->spp_type, active, passive);
1806 } 2038 }
1807 if (!resp) { 2039 if (!resp) {
1808 if (spp->spp_flags & FC_SPP_EST_IMG_PAIR) 2040 if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
@@ -1823,20 +2055,13 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1823 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 2055 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1824 lport->tt.frame_send(lport, fp); 2056 lport->tt.frame_send(lport, fp);
1825 2057
1826 switch (rdata->rp_state) {
1827 case RPORT_ST_PRLI:
1828 fc_rport_enter_ready(rdata);
1829 break;
1830 default:
1831 break;
1832 }
1833 goto drop; 2058 goto drop;
1834 2059
1835reject_len: 2060reject_len:
1836 rjt_data.reason = ELS_RJT_PROT; 2061 rjt_data.reason = ELS_RJT_PROT;
1837 rjt_data.explan = ELS_EXPL_INV_LEN; 2062 rjt_data.explan = ELS_EXPL_INV_LEN;
1838reject: 2063reject:
1839 lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); 2064 fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
1840drop: 2065drop:
1841 fc_frame_free(rx_fp); 2066 fc_frame_free(rx_fp);
1842} 2067}
@@ -1907,7 +2132,7 @@ reject_len:
1907 rjt_data.reason = ELS_RJT_PROT; 2132 rjt_data.reason = ELS_RJT_PROT;
1908 rjt_data.explan = ELS_EXPL_INV_LEN; 2133 rjt_data.explan = ELS_EXPL_INV_LEN;
1909reject: 2134reject:
1910 lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); 2135 fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
1911drop: 2136drop:
1912 fc_frame_free(rx_fp); 2137 fc_frame_free(rx_fp);
1913} 2138}
@@ -1919,17 +2144,19 @@ drop:
1919 * 2144 *
1920 * Locking Note: The rport lock is expected to be held before calling 2145 * Locking Note: The rport lock is expected to be held before calling
1921 * this function. 2146 * this function.
2147 *
2148 * Reference counting: drops kref on return
1922 */ 2149 */
1923static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) 2150static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
1924{ 2151{
1925 struct fc_rport_priv *rdata; 2152 struct fc_rport_priv *rdata;
1926 u32 sid; 2153 u32 sid;
1927 2154
1928 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 2155 fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
1929 2156
1930 sid = fc_frame_sid(fp); 2157 sid = fc_frame_sid(fp);
1931 2158
1932 rdata = lport->tt.rport_lookup(lport, sid); 2159 rdata = fc_rport_lookup(lport, sid);
1933 if (rdata) { 2160 if (rdata) {
1934 mutex_lock(&rdata->rp_mutex); 2161 mutex_lock(&rdata->rp_mutex);
1935 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", 2162 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
@@ -1937,7 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
1937 2164
1938 fc_rport_enter_delete(rdata, RPORT_EV_STOP); 2165 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
1939 mutex_unlock(&rdata->rp_mutex); 2166 mutex_unlock(&rdata->rp_mutex);
1940 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); 2167 kref_put(&rdata->kref, fc_rport_destroy);
1941 } else 2168 } else
1942 FC_RPORT_ID_DBG(lport, sid, 2169 FC_RPORT_ID_DBG(lport, sid,
1943 "Received LOGO from non-logged-in port\n"); 2170 "Received LOGO from non-logged-in port\n");
@@ -1947,41 +2174,11 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
1947/** 2174/**
1948 * fc_rport_flush_queue() - Flush the rport_event_queue 2175 * fc_rport_flush_queue() - Flush the rport_event_queue
1949 */ 2176 */
1950static void fc_rport_flush_queue(void) 2177void fc_rport_flush_queue(void)
1951{ 2178{
1952 flush_workqueue(rport_event_queue); 2179 flush_workqueue(rport_event_queue);
1953} 2180}
1954 2181EXPORT_SYMBOL(fc_rport_flush_queue);
1955/**
1956 * fc_rport_init() - Initialize the remote port layer for a local port
1957 * @lport: The local port to initialize the remote port layer for
1958 */
1959int fc_rport_init(struct fc_lport *lport)
1960{
1961 if (!lport->tt.rport_lookup)
1962 lport->tt.rport_lookup = fc_rport_lookup;
1963
1964 if (!lport->tt.rport_create)
1965 lport->tt.rport_create = fc_rport_create;
1966
1967 if (!lport->tt.rport_login)
1968 lport->tt.rport_login = fc_rport_login;
1969
1970 if (!lport->tt.rport_logoff)
1971 lport->tt.rport_logoff = fc_rport_logoff;
1972
1973 if (!lport->tt.rport_recv_req)
1974 lport->tt.rport_recv_req = fc_rport_recv_req;
1975
1976 if (!lport->tt.rport_flush_queue)
1977 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1978
1979 if (!lport->tt.rport_destroy)
1980 lport->tt.rport_destroy = fc_rport_destroy;
1981
1982 return 0;
1983}
1984EXPORT_SYMBOL(fc_rport_init);
1985 2182
1986/** 2183/**
1987 * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator. 2184 * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b484859464f6..8a20b4e86224 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -648,6 +648,10 @@ struct lpfc_hba {
648#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ 648#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
649#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ 649#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
650#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ 650#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
651#define HBA_FORCED_LINK_SPEED 0x40000 /*
652 * Firmware supports Forced Link Speed
653 * capability
654 */
651 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 655 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
652 struct lpfc_dmabuf slim2p; 656 struct lpfc_dmabuf slim2p;
653 657
@@ -746,6 +750,8 @@ struct lpfc_hba {
746 uint32_t cfg_oas_priority; 750 uint32_t cfg_oas_priority;
747 uint32_t cfg_XLanePriority; 751 uint32_t cfg_XLanePriority;
748 uint32_t cfg_enable_bg; 752 uint32_t cfg_enable_bg;
753 uint32_t cfg_prot_mask;
754 uint32_t cfg_prot_guard;
749 uint32_t cfg_hostmem_hgp; 755 uint32_t cfg_hostmem_hgp;
750 uint32_t cfg_log_verbose; 756 uint32_t cfg_log_verbose;
751 uint32_t cfg_aer_support; 757 uint32_t cfg_aer_support;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f1019908800e..c84775562c65 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2759,18 +2759,14 @@ LPFC_ATTR_R(enable_npiv, 1, 0, 1,
2759LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, 2759LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
2760 "FCF Fast failover=1 Priority failover=2"); 2760 "FCF Fast failover=1 Priority failover=2");
2761 2761
2762int lpfc_enable_rrq = 2;
2763module_param(lpfc_enable_rrq, int, S_IRUGO);
2764MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
2765lpfc_param_show(enable_rrq);
2766/* 2762/*
2767# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures 2763# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
2768# 0x0 = disabled, XRI/OXID use not tracked. 2764# 0x0 = disabled, XRI/OXID use not tracked.
2769# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. 2765# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
2770# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent. 2766# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
2771*/ 2767*/
2772lpfc_param_init(enable_rrq, 2, 0, 2); 2768LPFC_ATTR_R(enable_rrq, 2, 0, 2,
2773static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL); 2769 "Enable RRQ functionality");
2774 2770
2775/* 2771/*
2776# lpfc_suppress_link_up: Bring link up at initialization 2772# lpfc_suppress_link_up: Bring link up at initialization
@@ -2827,14 +2823,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
2827static DEVICE_ATTR(txcmplq_hw, S_IRUGO, 2823static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
2828 lpfc_txcmplq_hw_show, NULL); 2824 lpfc_txcmplq_hw_show, NULL);
2829 2825
2830int lpfc_iocb_cnt = 2; 2826LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
2831module_param(lpfc_iocb_cnt, int, S_IRUGO);
2832MODULE_PARM_DESC(lpfc_iocb_cnt,
2833 "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs"); 2827 "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
2834lpfc_param_show(iocb_cnt);
2835lpfc_param_init(iocb_cnt, 2, 1, 5);
2836static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
2837 lpfc_iocb_cnt_show, NULL);
2838 2828
2839/* 2829/*
2840# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 2830# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -2887,9 +2877,9 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
2887 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; 2877 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
2888 if (val != LPFC_DEF_DEVLOSS_TMO) 2878 if (val != LPFC_DEF_DEVLOSS_TMO)
2889 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2879 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2890 "0407 Ignoring nodev_tmo module " 2880 "0407 Ignoring lpfc_nodev_tmo module "
2891 "parameter because devloss_tmo is " 2881 "parameter because lpfc_devloss_tmo "
2892 "set.\n"); 2882 "is set.\n");
2893 return 0; 2883 return 0;
2894 } 2884 }
2895 2885
@@ -2948,8 +2938,8 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
2948 if (vport->dev_loss_tmo_changed || 2938 if (vport->dev_loss_tmo_changed ||
2949 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { 2939 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
2950 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2940 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2951 "0401 Ignoring change to nodev_tmo " 2941 "0401 Ignoring change to lpfc_nodev_tmo "
2952 "because devloss_tmo is set.\n"); 2942 "because lpfc_devloss_tmo is set.\n");
2953 return 0; 2943 return 0;
2954 } 2944 }
2955 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 2945 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
@@ -2964,7 +2954,7 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
2964 return 0; 2954 return 0;
2965 } 2955 }
2966 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2956 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2967 "0403 lpfc_nodev_tmo attribute cannot be set to" 2957 "0403 lpfc_nodev_tmo attribute cannot be set to "
2968 "%d, allowed range is [%d, %d]\n", 2958 "%d, allowed range is [%d, %d]\n",
2969 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 2959 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
2970 return -EINVAL; 2960 return -EINVAL;
@@ -3015,8 +3005,8 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3015 } 3005 }
3016 3006
3017 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3007 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3018 "0404 lpfc_devloss_tmo attribute cannot be set to" 3008 "0404 lpfc_devloss_tmo attribute cannot be set to "
3019 " %d, allowed range is [%d, %d]\n", 3009 "%d, allowed range is [%d, %d]\n",
3020 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3010 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3021 return -EINVAL; 3011 return -EINVAL;
3022} 3012}
@@ -3204,6 +3194,8 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
3204# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. 3194# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
3205# Default value is 0. 3195# Default value is 0.
3206*/ 3196*/
3197LPFC_ATTR(topology, 0, 0, 6,
3198 "Select Fibre Channel topology");
3207 3199
3208/** 3200/**
3209 * lpfc_topology_set - Set the adapters topology field 3201 * lpfc_topology_set - Set the adapters topology field
@@ -3281,11 +3273,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
3281 phba->brd_no, val); 3273 phba->brd_no, val);
3282 return -EINVAL; 3274 return -EINVAL;
3283} 3275}
3284static int lpfc_topology = 0; 3276
3285module_param(lpfc_topology, int, S_IRUGO);
3286MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
3287lpfc_param_show(topology) 3277lpfc_param_show(topology)
3288lpfc_param_init(topology, 0, 0, 6)
3289static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 3278static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
3290 lpfc_topology_show, lpfc_topology_store); 3279 lpfc_topology_show, lpfc_topology_store);
3291 3280
@@ -3679,7 +3668,12 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
3679 int nolip = 0; 3668 int nolip = 0;
3680 const char *val_buf = buf; 3669 const char *val_buf = buf;
3681 int err; 3670 int err;
3682 uint32_t prev_val; 3671 uint32_t prev_val, if_type;
3672
3673 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
3674 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 &&
3675 phba->hba_flag & HBA_FORCED_LINK_SPEED)
3676 return -EPERM;
3683 3677
3684 if (!strncmp(buf, "nolip ", strlen("nolip "))) { 3678 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
3685 nolip = 1; 3679 nolip = 1;
@@ -3789,6 +3783,9 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
3789# 1 = aer supported and enabled (default) 3783# 1 = aer supported and enabled (default)
3790# Value range is [0,1]. Default value is 1. 3784# Value range is [0,1]. Default value is 1.
3791*/ 3785*/
3786LPFC_ATTR(aer_support, 1, 0, 1,
3787 "Enable PCIe device AER support");
3788lpfc_param_show(aer_support)
3792 3789
3793/** 3790/**
3794 * lpfc_aer_support_store - Set the adapter for aer support 3791 * lpfc_aer_support_store - Set the adapter for aer support
@@ -3871,46 +3868,6 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
3871 return rc; 3868 return rc;
3872} 3869}
3873 3870
3874static int lpfc_aer_support = 1;
3875module_param(lpfc_aer_support, int, S_IRUGO);
3876MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
3877lpfc_param_show(aer_support)
3878
3879/**
3880 * lpfc_aer_support_init - Set the initial adapters aer support flag
3881 * @phba: lpfc_hba pointer.
3882 * @val: enable aer or disable aer flag.
3883 *
3884 * Description:
3885 * If val is in a valid range [0,1], then set the adapter's initial
3886 * cfg_aer_support field. It will be up to the driver's probe_one
3887 * routine to determine whether the device's AER support can be set
3888 * or not.
3889 *
3890 * Notes:
3891 * If the value is not in range log a kernel error message, and
3892 * choose the default value of setting AER support and return.
3893 *
3894 * Returns:
3895 * zero if val saved.
3896 * -EINVAL val out of range
3897 **/
3898static int
3899lpfc_aer_support_init(struct lpfc_hba *phba, int val)
3900{
3901 if (val == 0 || val == 1) {
3902 phba->cfg_aer_support = val;
3903 return 0;
3904 }
3905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3906 "2712 lpfc_aer_support attribute value %d out "
3907 "of range, allowed values are 0|1, setting it "
3908 "to default value of 1\n", val);
3909 /* By default, try to enable AER on a device */
3910 phba->cfg_aer_support = 1;
3911 return -EINVAL;
3912}
3913
3914static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR, 3871static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
3915 lpfc_aer_support_show, lpfc_aer_support_store); 3872 lpfc_aer_support_show, lpfc_aer_support_store);
3916 3873
@@ -4055,39 +4012,10 @@ lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
4055 return rc; 4012 return rc;
4056} 4013}
4057 4014
4058static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN; 4015LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
4059module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR); 4016 "Enable PCIe device SR-IOV virtual fn");
4060MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
4061lpfc_param_show(sriov_nr_virtfn)
4062
4063/**
4064 * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
4065 * @phba: lpfc_hba pointer.
4066 * @val: link speed value.
4067 *
4068 * Description:
4069 * If val is in a valid range [0,255], then set the adapter's initial
4070 * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
4071 * number shall be used instead. It will be up to the driver's probe_one
4072 * routine to determine whether the device's SR-IOV is supported or not.
4073 *
4074 * Returns:
4075 * zero if val saved.
4076 * -EINVAL val out of range
4077 **/
4078static int
4079lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
4080{
4081 if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
4082 phba->cfg_sriov_nr_virtfn = val;
4083 return 0;
4084 }
4085 4017
4086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4018lpfc_param_show(sriov_nr_virtfn)
4087 "3017 Enabling %d virtual functions is not "
4088 "allowed.\n", val);
4089 return -EINVAL;
4090}
4091static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR, 4019static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
4092 lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store); 4020 lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
4093 4021
@@ -4251,7 +4179,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
4251 } 4179 }
4252 4180
4253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4254 "3016 fcp_imax: %d out of range, using default\n", val); 4182 "3016 lpfc_fcp_imax: %d out of range, using default\n",
4183 val);
4255 phba->cfg_fcp_imax = LPFC_DEF_IMAX; 4184 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
4256 4185
4257 return 0; 4186 return 0;
@@ -4401,8 +4330,8 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
4401 } 4330 }
4402 4331
4403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4404 "3326 fcp_cpu_map: %d out of range, using default\n", 4333 "3326 lpfc_fcp_cpu_map: %d out of range, using "
4405 val); 4334 "default\n", val);
4406 phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; 4335 phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
4407 4336
4408 return 0; 4337 return 0;
@@ -4441,12 +4370,10 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
4441# to limit the I/O completion time to the parameter value. 4370# to limit the I/O completion time to the parameter value.
4442# The value is set in milliseconds. 4371# The value is set in milliseconds.
4443*/ 4372*/
4444static int lpfc_max_scsicmpl_time; 4373LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
4445module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
4446MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
4447 "Use command completion time to control queue depth"); 4374 "Use command completion time to control queue depth");
4375
4448lpfc_vport_param_show(max_scsicmpl_time); 4376lpfc_vport_param_show(max_scsicmpl_time);
4449lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
4450static int 4377static int
4451lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) 4378lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
4452{ 4379{
@@ -4691,12 +4618,15 @@ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
4691# HBA supports DIX Type 1: Host to HBA Type 1 protection 4618# HBA supports DIX Type 1: Host to HBA Type 1 protection
4692# 4619#
4693*/ 4620*/
4694unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | 4621LPFC_ATTR(prot_mask,
4695 SHOST_DIX_TYPE0_PROTECTION | 4622 (SHOST_DIF_TYPE1_PROTECTION |
4696 SHOST_DIX_TYPE1_PROTECTION; 4623 SHOST_DIX_TYPE0_PROTECTION |
4697 4624 SHOST_DIX_TYPE1_PROTECTION),
4698module_param(lpfc_prot_mask, uint, S_IRUGO); 4625 0,
4699MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); 4626 (SHOST_DIF_TYPE1_PROTECTION |
4627 SHOST_DIX_TYPE0_PROTECTION |
4628 SHOST_DIX_TYPE1_PROTECTION),
4629 "T10-DIF host protection capabilities mask");
4700 4630
4701/* 4631/*
4702# lpfc_prot_guard: i 4632# lpfc_prot_guard: i
@@ -4706,9 +4636,9 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
4706# - Default will result in registering capabilities for all guard types 4636# - Default will result in registering capabilities for all guard types
4707# 4637#
4708*/ 4638*/
4709unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; 4639LPFC_ATTR(prot_guard,
4710module_param(lpfc_prot_guard, byte, S_IRUGO); 4640 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
4711MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); 4641 "T10-DIF host protection guard type");
4712 4642
4713/* 4643/*
4714 * Delay initial NPort discovery when Clean Address bit is cleared in 4644 * Delay initial NPort discovery when Clean Address bit is cleared in
@@ -5828,6 +5758,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
5828 phba->cfg_oas_flags = 0; 5758 phba->cfg_oas_flags = 0;
5829 phba->cfg_oas_priority = 0; 5759 phba->cfg_oas_priority = 0;
5830 lpfc_enable_bg_init(phba, lpfc_enable_bg); 5760 lpfc_enable_bg_init(phba, lpfc_enable_bg);
5761 lpfc_prot_mask_init(phba, lpfc_prot_mask);
5762 lpfc_prot_guard_init(phba, lpfc_prot_guard);
5831 if (phba->sli_rev == LPFC_SLI_REV4) 5763 if (phba->sli_rev == LPFC_SLI_REV4)
5832 phba->cfg_poll = 0; 5764 phba->cfg_poll = 0;
5833 else 5765 else
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 05dcc2abd541..7dca4d6a8883 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -24,6 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/bsg-lib.h>
27 28
28#include <scsi/scsi.h> 29#include <scsi/scsi.h>
29#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
@@ -97,7 +98,7 @@ struct lpfc_bsg_menlo {
97#define TYPE_MENLO 4 98#define TYPE_MENLO 4
98struct bsg_job_data { 99struct bsg_job_data {
99 uint32_t type; 100 uint32_t type;
100 struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */ 101 struct bsg_job *set_job; /* job waiting for this iocb to finish */
101 union { 102 union {
102 struct lpfc_bsg_event *evt; 103 struct lpfc_bsg_event *evt;
103 struct lpfc_bsg_iocb iocb; 104 struct lpfc_bsg_iocb iocb;
@@ -211,7 +212,7 @@ lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
211 212
212static unsigned int 213static unsigned int
213lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, 214lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
214 struct fc_bsg_buffer *bsg_buffers, 215 struct bsg_buffer *bsg_buffers,
215 unsigned int bytes_to_transfer, int to_buffers) 216 unsigned int bytes_to_transfer, int to_buffers)
216{ 217{
217 218
@@ -297,7 +298,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
297 struct lpfc_iocbq *rspiocbq) 298 struct lpfc_iocbq *rspiocbq)
298{ 299{
299 struct bsg_job_data *dd_data; 300 struct bsg_job_data *dd_data;
300 struct fc_bsg_job *job; 301 struct bsg_job *job;
302 struct fc_bsg_reply *bsg_reply;
301 IOCB_t *rsp; 303 IOCB_t *rsp;
302 struct lpfc_dmabuf *bmp, *cmp, *rmp; 304 struct lpfc_dmabuf *bmp, *cmp, *rmp;
303 struct lpfc_nodelist *ndlp; 305 struct lpfc_nodelist *ndlp;
@@ -312,6 +314,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
312 spin_lock_irqsave(&phba->ct_ev_lock, flags); 314 spin_lock_irqsave(&phba->ct_ev_lock, flags);
313 job = dd_data->set_job; 315 job = dd_data->set_job;
314 if (job) { 316 if (job) {
317 bsg_reply = job->reply;
315 /* Prevent timeout handling from trying to abort job */ 318 /* Prevent timeout handling from trying to abort job */
316 job->dd_data = NULL; 319 job->dd_data = NULL;
317 } 320 }
@@ -350,7 +353,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
350 } 353 }
351 } else { 354 } else {
352 rsp_size = rsp->un.genreq64.bdl.bdeSize; 355 rsp_size = rsp->un.genreq64.bdl.bdeSize;
353 job->reply->reply_payload_rcv_len = 356 bsg_reply->reply_payload_rcv_len =
354 lpfc_bsg_copy_data(rmp, &job->reply_payload, 357 lpfc_bsg_copy_data(rmp, &job->reply_payload,
355 rsp_size, 0); 358 rsp_size, 0);
356 } 359 }
@@ -367,8 +370,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
367 /* Complete the job if the job is still active */ 370 /* Complete the job if the job is still active */
368 371
369 if (job) { 372 if (job) {
370 job->reply->result = rc; 373 bsg_reply->result = rc;
371 job->job_done(job); 374 bsg_job_done(job, bsg_reply->result,
375 bsg_reply->reply_payload_rcv_len);
372 } 376 }
373 return; 377 return;
374} 378}
@@ -378,12 +382,13 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
378 * @job: fc_bsg_job to handle 382 * @job: fc_bsg_job to handle
379 **/ 383 **/
380static int 384static int
381lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) 385lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
382{ 386{
383 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 387 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
384 struct lpfc_hba *phba = vport->phba; 388 struct lpfc_hba *phba = vport->phba;
385 struct lpfc_rport_data *rdata = job->rport->dd_data; 389 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
386 struct lpfc_nodelist *ndlp = rdata->pnode; 390 struct lpfc_nodelist *ndlp = rdata->pnode;
391 struct fc_bsg_reply *bsg_reply = job->reply;
387 struct ulp_bde64 *bpl = NULL; 392 struct ulp_bde64 *bpl = NULL;
388 uint32_t timeout; 393 uint32_t timeout;
389 struct lpfc_iocbq *cmdiocbq = NULL; 394 struct lpfc_iocbq *cmdiocbq = NULL;
@@ -398,7 +403,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
398 int iocb_stat; 403 int iocb_stat;
399 404
400 /* in case no data is transferred */ 405 /* in case no data is transferred */
401 job->reply->reply_payload_rcv_len = 0; 406 bsg_reply->reply_payload_rcv_len = 0;
402 407
403 /* allocate our bsg tracking structure */ 408 /* allocate our bsg tracking structure */
404 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 409 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -542,7 +547,7 @@ no_ndlp:
542 kfree(dd_data); 547 kfree(dd_data);
543no_dd_data: 548no_dd_data:
544 /* make error code available to userspace */ 549 /* make error code available to userspace */
545 job->reply->result = rc; 550 bsg_reply->result = rc;
546 job->dd_data = NULL; 551 job->dd_data = NULL;
547 return rc; 552 return rc;
548} 553}
@@ -570,7 +575,8 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
570 struct lpfc_iocbq *rspiocbq) 575 struct lpfc_iocbq *rspiocbq)
571{ 576{
572 struct bsg_job_data *dd_data; 577 struct bsg_job_data *dd_data;
573 struct fc_bsg_job *job; 578 struct bsg_job *job;
579 struct fc_bsg_reply *bsg_reply;
574 IOCB_t *rsp; 580 IOCB_t *rsp;
575 struct lpfc_nodelist *ndlp; 581 struct lpfc_nodelist *ndlp;
576 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; 582 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
@@ -588,6 +594,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
588 spin_lock_irqsave(&phba->ct_ev_lock, flags); 594 spin_lock_irqsave(&phba->ct_ev_lock, flags);
589 job = dd_data->set_job; 595 job = dd_data->set_job;
590 if (job) { 596 if (job) {
597 bsg_reply = job->reply;
591 /* Prevent timeout handling from trying to abort job */ 598 /* Prevent timeout handling from trying to abort job */
592 job->dd_data = NULL; 599 job->dd_data = NULL;
593 } 600 }
@@ -609,17 +616,17 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
609 if (job) { 616 if (job) {
610 if (rsp->ulpStatus == IOSTAT_SUCCESS) { 617 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
611 rsp_size = rsp->un.elsreq64.bdl.bdeSize; 618 rsp_size = rsp->un.elsreq64.bdl.bdeSize;
612 job->reply->reply_payload_rcv_len = 619 bsg_reply->reply_payload_rcv_len =
613 sg_copy_from_buffer(job->reply_payload.sg_list, 620 sg_copy_from_buffer(job->reply_payload.sg_list,
614 job->reply_payload.sg_cnt, 621 job->reply_payload.sg_cnt,
615 prsp->virt, 622 prsp->virt,
616 rsp_size); 623 rsp_size);
617 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 624 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
618 job->reply->reply_payload_rcv_len = 625 bsg_reply->reply_payload_rcv_len =
619 sizeof(struct fc_bsg_ctels_reply); 626 sizeof(struct fc_bsg_ctels_reply);
620 /* LS_RJT data returned in word 4 */ 627 /* LS_RJT data returned in word 4 */
621 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 628 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
622 els_reply = &job->reply->reply_data.ctels_reply; 629 els_reply = &bsg_reply->reply_data.ctels_reply;
623 els_reply->status = FC_CTELS_STATUS_REJECT; 630 els_reply->status = FC_CTELS_STATUS_REJECT;
624 els_reply->rjt_data.action = rjt_data[3]; 631 els_reply->rjt_data.action = rjt_data[3];
625 els_reply->rjt_data.reason_code = rjt_data[2]; 632 els_reply->rjt_data.reason_code = rjt_data[2];
@@ -637,8 +644,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
637 /* Complete the job if the job is still active */ 644 /* Complete the job if the job is still active */
638 645
639 if (job) { 646 if (job) {
640 job->reply->result = rc; 647 bsg_reply->result = rc;
641 job->job_done(job); 648 bsg_job_done(job, bsg_reply->result,
649 bsg_reply->reply_payload_rcv_len);
642 } 650 }
643 return; 651 return;
644} 652}
@@ -648,12 +656,14 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
648 * @job: fc_bsg_job to handle 656 * @job: fc_bsg_job to handle
649 **/ 657 **/
650static int 658static int
651lpfc_bsg_rport_els(struct fc_bsg_job *job) 659lpfc_bsg_rport_els(struct bsg_job *job)
652{ 660{
653 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 661 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
654 struct lpfc_hba *phba = vport->phba; 662 struct lpfc_hba *phba = vport->phba;
655 struct lpfc_rport_data *rdata = job->rport->dd_data; 663 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
656 struct lpfc_nodelist *ndlp = rdata->pnode; 664 struct lpfc_nodelist *ndlp = rdata->pnode;
665 struct fc_bsg_request *bsg_request = job->request;
666 struct fc_bsg_reply *bsg_reply = job->reply;
657 uint32_t elscmd; 667 uint32_t elscmd;
658 uint32_t cmdsize; 668 uint32_t cmdsize;
659 struct lpfc_iocbq *cmdiocbq; 669 struct lpfc_iocbq *cmdiocbq;
@@ -664,7 +674,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
664 int rc = 0; 674 int rc = 0;
665 675
666 /* in case no data is transferred */ 676 /* in case no data is transferred */
667 job->reply->reply_payload_rcv_len = 0; 677 bsg_reply->reply_payload_rcv_len = 0;
668 678
669 /* verify the els command is not greater than the 679 /* verify the els command is not greater than the
670 * maximum ELS transfer size. 680 * maximum ELS transfer size.
@@ -684,7 +694,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
684 goto no_dd_data; 694 goto no_dd_data;
685 } 695 }
686 696
687 elscmd = job->request->rqst_data.r_els.els_code; 697 elscmd = bsg_request->rqst_data.r_els.els_code;
688 cmdsize = job->request_payload.payload_len; 698 cmdsize = job->request_payload.payload_len;
689 699
690 if (!lpfc_nlp_get(ndlp)) { 700 if (!lpfc_nlp_get(ndlp)) {
@@ -771,7 +781,7 @@ free_dd_data:
771 781
772no_dd_data: 782no_dd_data:
773 /* make error code available to userspace */ 783 /* make error code available to userspace */
774 job->reply->result = rc; 784 bsg_reply->result = rc;
775 job->dd_data = NULL; 785 job->dd_data = NULL;
776 return rc; 786 return rc;
777} 787}
@@ -917,7 +927,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
917 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 927 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
918 struct lpfc_hbq_entry *hbqe; 928 struct lpfc_hbq_entry *hbqe;
919 struct lpfc_sli_ct_request *ct_req; 929 struct lpfc_sli_ct_request *ct_req;
920 struct fc_bsg_job *job = NULL; 930 struct bsg_job *job = NULL;
931 struct fc_bsg_reply *bsg_reply;
921 struct bsg_job_data *dd_data = NULL; 932 struct bsg_job_data *dd_data = NULL;
922 unsigned long flags; 933 unsigned long flags;
923 int size = 0; 934 int size = 0;
@@ -1120,13 +1131,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1120 dd_data->set_job = NULL; 1131 dd_data->set_job = NULL;
1121 lpfc_bsg_event_unref(evt); 1132 lpfc_bsg_event_unref(evt);
1122 if (job) { 1133 if (job) {
1123 job->reply->reply_payload_rcv_len = size; 1134 bsg_reply = job->reply;
1135 bsg_reply->reply_payload_rcv_len = size;
1124 /* make error code available to userspace */ 1136 /* make error code available to userspace */
1125 job->reply->result = 0; 1137 bsg_reply->result = 0;
1126 job->dd_data = NULL; 1138 job->dd_data = NULL;
1127 /* complete the job back to userspace */ 1139 /* complete the job back to userspace */
1128 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1140 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1129 job->job_done(job); 1141 bsg_job_done(job, bsg_reply->result,
1142 bsg_reply->reply_payload_rcv_len);
1130 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1143 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1131 } 1144 }
1132 } 1145 }
@@ -1187,10 +1200,11 @@ lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1187 * @job: SET_EVENT fc_bsg_job 1200 * @job: SET_EVENT fc_bsg_job
1188 **/ 1201 **/
1189static int 1202static int
1190lpfc_bsg_hba_set_event(struct fc_bsg_job *job) 1203lpfc_bsg_hba_set_event(struct bsg_job *job)
1191{ 1204{
1192 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1205 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1193 struct lpfc_hba *phba = vport->phba; 1206 struct lpfc_hba *phba = vport->phba;
1207 struct fc_bsg_request *bsg_request = job->request;
1194 struct set_ct_event *event_req; 1208 struct set_ct_event *event_req;
1195 struct lpfc_bsg_event *evt; 1209 struct lpfc_bsg_event *evt;
1196 int rc = 0; 1210 int rc = 0;
@@ -1208,7 +1222,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1208 } 1222 }
1209 1223
1210 event_req = (struct set_ct_event *) 1224 event_req = (struct set_ct_event *)
1211 job->request->rqst_data.h_vendor.vendor_cmd; 1225 bsg_request->rqst_data.h_vendor.vendor_cmd;
1212 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1226 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1213 FC_REG_EVENT_MASK); 1227 FC_REG_EVENT_MASK);
1214 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1228 spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1271,10 +1285,12 @@ job_error:
1271 * @job: GET_EVENT fc_bsg_job 1285 * @job: GET_EVENT fc_bsg_job
1272 **/ 1286 **/
1273static int 1287static int
1274lpfc_bsg_hba_get_event(struct fc_bsg_job *job) 1288lpfc_bsg_hba_get_event(struct bsg_job *job)
1275{ 1289{
1276 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1290 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1277 struct lpfc_hba *phba = vport->phba; 1291 struct lpfc_hba *phba = vport->phba;
1292 struct fc_bsg_request *bsg_request = job->request;
1293 struct fc_bsg_reply *bsg_reply = job->reply;
1278 struct get_ct_event *event_req; 1294 struct get_ct_event *event_req;
1279 struct get_ct_event_reply *event_reply; 1295 struct get_ct_event_reply *event_reply;
1280 struct lpfc_bsg_event *evt, *evt_next; 1296 struct lpfc_bsg_event *evt, *evt_next;
@@ -1292,10 +1308,10 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1292 } 1308 }
1293 1309
1294 event_req = (struct get_ct_event *) 1310 event_req = (struct get_ct_event *)
1295 job->request->rqst_data.h_vendor.vendor_cmd; 1311 bsg_request->rqst_data.h_vendor.vendor_cmd;
1296 1312
1297 event_reply = (struct get_ct_event_reply *) 1313 event_reply = (struct get_ct_event_reply *)
1298 job->reply->reply_data.vendor_reply.vendor_rsp; 1314 bsg_reply->reply_data.vendor_reply.vendor_rsp;
1299 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1315 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1300 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { 1316 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1301 if (evt->reg_id == event_req->ev_reg_id) { 1317 if (evt->reg_id == event_req->ev_reg_id) {
@@ -1315,7 +1331,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1315 * an error indicating that there isn't anymore 1331 * an error indicating that there isn't anymore
1316 */ 1332 */
1317 if (evt_dat == NULL) { 1333 if (evt_dat == NULL) {
1318 job->reply->reply_payload_rcv_len = 0; 1334 bsg_reply->reply_payload_rcv_len = 0;
1319 rc = -ENOENT; 1335 rc = -ENOENT;
1320 goto job_error; 1336 goto job_error;
1321 } 1337 }
@@ -1331,12 +1347,12 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1331 event_reply->type = evt_dat->type; 1347 event_reply->type = evt_dat->type;
1332 event_reply->immed_data = evt_dat->immed_dat; 1348 event_reply->immed_data = evt_dat->immed_dat;
1333 if (evt_dat->len > 0) 1349 if (evt_dat->len > 0)
1334 job->reply->reply_payload_rcv_len = 1350 bsg_reply->reply_payload_rcv_len =
1335 sg_copy_from_buffer(job->request_payload.sg_list, 1351 sg_copy_from_buffer(job->request_payload.sg_list,
1336 job->request_payload.sg_cnt, 1352 job->request_payload.sg_cnt,
1337 evt_dat->data, evt_dat->len); 1353 evt_dat->data, evt_dat->len);
1338 else 1354 else
1339 job->reply->reply_payload_rcv_len = 0; 1355 bsg_reply->reply_payload_rcv_len = 0;
1340 1356
1341 if (evt_dat) { 1357 if (evt_dat) {
1342 kfree(evt_dat->data); 1358 kfree(evt_dat->data);
@@ -1347,13 +1363,14 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1347 lpfc_bsg_event_unref(evt); 1363 lpfc_bsg_event_unref(evt);
1348 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1364 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1349 job->dd_data = NULL; 1365 job->dd_data = NULL;
1350 job->reply->result = 0; 1366 bsg_reply->result = 0;
1351 job->job_done(job); 1367 bsg_job_done(job, bsg_reply->result,
1368 bsg_reply->reply_payload_rcv_len);
1352 return 0; 1369 return 0;
1353 1370
1354job_error: 1371job_error:
1355 job->dd_data = NULL; 1372 job->dd_data = NULL;
1356 job->reply->result = rc; 1373 bsg_reply->result = rc;
1357 return rc; 1374 return rc;
1358} 1375}
1359 1376
@@ -1380,7 +1397,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1380 struct lpfc_iocbq *rspiocbq) 1397 struct lpfc_iocbq *rspiocbq)
1381{ 1398{
1382 struct bsg_job_data *dd_data; 1399 struct bsg_job_data *dd_data;
1383 struct fc_bsg_job *job; 1400 struct bsg_job *job;
1401 struct fc_bsg_reply *bsg_reply;
1384 IOCB_t *rsp; 1402 IOCB_t *rsp;
1385 struct lpfc_dmabuf *bmp, *cmp; 1403 struct lpfc_dmabuf *bmp, *cmp;
1386 struct lpfc_nodelist *ndlp; 1404 struct lpfc_nodelist *ndlp;
@@ -1411,6 +1429,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1411 /* Copy the completed job data or set the error status */ 1429 /* Copy the completed job data or set the error status */
1412 1430
1413 if (job) { 1431 if (job) {
1432 bsg_reply = job->reply;
1414 if (rsp->ulpStatus) { 1433 if (rsp->ulpStatus) {
1415 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1434 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1416 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 1435 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
@@ -1428,7 +1447,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1428 rc = -EACCES; 1447 rc = -EACCES;
1429 } 1448 }
1430 } else { 1449 } else {
1431 job->reply->reply_payload_rcv_len = 0; 1450 bsg_reply->reply_payload_rcv_len = 0;
1432 } 1451 }
1433 } 1452 }
1434 1453
@@ -1442,8 +1461,9 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1442 /* Complete the job if the job is still active */ 1461 /* Complete the job if the job is still active */
1443 1462
1444 if (job) { 1463 if (job) {
1445 job->reply->result = rc; 1464 bsg_reply->result = rc;
1446 job->job_done(job); 1465 bsg_job_done(job, bsg_reply->result,
1466 bsg_reply->reply_payload_rcv_len);
1447 } 1467 }
1448 return; 1468 return;
1449} 1469}
@@ -1457,7 +1477,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1457 * @num_entry: Number of enties in the bde. 1477 * @num_entry: Number of enties in the bde.
1458 **/ 1478 **/
1459static int 1479static int
1460lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, 1480lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1461 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, 1481 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1462 int num_entry) 1482 int num_entry)
1463{ 1483{
@@ -1603,12 +1623,14 @@ no_dd_data:
1603 * @job: SEND_MGMT_RESP fc_bsg_job 1623 * @job: SEND_MGMT_RESP fc_bsg_job
1604 **/ 1624 **/
1605static int 1625static int
1606lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) 1626lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1607{ 1627{
1608 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1628 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1609 struct lpfc_hba *phba = vport->phba; 1629 struct lpfc_hba *phba = vport->phba;
1630 struct fc_bsg_request *bsg_request = job->request;
1631 struct fc_bsg_reply *bsg_reply = job->reply;
1610 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1632 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1611 job->request->rqst_data.h_vendor.vendor_cmd; 1633 bsg_request->rqst_data.h_vendor.vendor_cmd;
1612 struct ulp_bde64 *bpl; 1634 struct ulp_bde64 *bpl;
1613 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; 1635 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1614 int bpl_entries; 1636 int bpl_entries;
@@ -1618,7 +1640,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1618 int rc = 0; 1640 int rc = 0;
1619 1641
1620 /* in case no data is transferred */ 1642 /* in case no data is transferred */
1621 job->reply->reply_payload_rcv_len = 0; 1643 bsg_reply->reply_payload_rcv_len = 0;
1622 1644
1623 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1645 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1624 rc = -ERANGE; 1646 rc = -ERANGE;
@@ -1664,7 +1686,7 @@ send_mgmt_rsp_free_bmp:
1664 kfree(bmp); 1686 kfree(bmp);
1665send_mgmt_rsp_exit: 1687send_mgmt_rsp_exit:
1666 /* make error code available to userspace */ 1688 /* make error code available to userspace */
1667 job->reply->result = rc; 1689 bsg_reply->result = rc;
1668 job->dd_data = NULL; 1690 job->dd_data = NULL;
1669 return rc; 1691 return rc;
1670} 1692}
@@ -1760,8 +1782,10 @@ lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1760 * All of this is done in-line. 1782 * All of this is done in-line.
1761 */ 1783 */
1762static int 1784static int
1763lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) 1785lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1764{ 1786{
1787 struct fc_bsg_request *bsg_request = job->request;
1788 struct fc_bsg_reply *bsg_reply = job->reply;
1765 struct diag_mode_set *loopback_mode; 1789 struct diag_mode_set *loopback_mode;
1766 uint32_t link_flags; 1790 uint32_t link_flags;
1767 uint32_t timeout; 1791 uint32_t timeout;
@@ -1771,7 +1795,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1771 int rc = 0; 1795 int rc = 0;
1772 1796
1773 /* no data to return just the return code */ 1797 /* no data to return just the return code */
1774 job->reply->reply_payload_rcv_len = 0; 1798 bsg_reply->reply_payload_rcv_len = 0;
1775 1799
1776 if (job->request_len < sizeof(struct fc_bsg_request) + 1800 if (job->request_len < sizeof(struct fc_bsg_request) +
1777 sizeof(struct diag_mode_set)) { 1801 sizeof(struct diag_mode_set)) {
@@ -1791,7 +1815,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1791 1815
1792 /* bring the link to diagnostic mode */ 1816 /* bring the link to diagnostic mode */
1793 loopback_mode = (struct diag_mode_set *) 1817 loopback_mode = (struct diag_mode_set *)
1794 job->request->rqst_data.h_vendor.vendor_cmd; 1818 bsg_request->rqst_data.h_vendor.vendor_cmd;
1795 link_flags = loopback_mode->type; 1819 link_flags = loopback_mode->type;
1796 timeout = loopback_mode->timeout * 100; 1820 timeout = loopback_mode->timeout * 100;
1797 1821
@@ -1864,10 +1888,11 @@ loopback_mode_exit:
1864 1888
1865job_error: 1889job_error:
1866 /* make error code available to userspace */ 1890 /* make error code available to userspace */
1867 job->reply->result = rc; 1891 bsg_reply->result = rc;
1868 /* complete the job back to userspace if no error */ 1892 /* complete the job back to userspace if no error */
1869 if (rc == 0) 1893 if (rc == 0)
1870 job->job_done(job); 1894 bsg_job_done(job, bsg_reply->result,
1895 bsg_reply->reply_payload_rcv_len);
1871 return rc; 1896 return rc;
1872} 1897}
1873 1898
@@ -2015,14 +2040,16 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
2015 * loopback mode in order to perform a diagnostic loopback test. 2040 * loopback mode in order to perform a diagnostic loopback test.
2016 */ 2041 */
2017static int 2042static int
2018lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) 2043lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2019{ 2044{
2045 struct fc_bsg_request *bsg_request = job->request;
2046 struct fc_bsg_reply *bsg_reply = job->reply;
2020 struct diag_mode_set *loopback_mode; 2047 struct diag_mode_set *loopback_mode;
2021 uint32_t link_flags, timeout; 2048 uint32_t link_flags, timeout;
2022 int i, rc = 0; 2049 int i, rc = 0;
2023 2050
2024 /* no data to return just the return code */ 2051 /* no data to return just the return code */
2025 job->reply->reply_payload_rcv_len = 0; 2052 bsg_reply->reply_payload_rcv_len = 0;
2026 2053
2027 if (job->request_len < sizeof(struct fc_bsg_request) + 2054 if (job->request_len < sizeof(struct fc_bsg_request) +
2028 sizeof(struct diag_mode_set)) { 2055 sizeof(struct diag_mode_set)) {
@@ -2054,7 +2081,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
2054 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2081 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2055 "3129 Bring link to diagnostic state.\n"); 2082 "3129 Bring link to diagnostic state.\n");
2056 loopback_mode = (struct diag_mode_set *) 2083 loopback_mode = (struct diag_mode_set *)
2057 job->request->rqst_data.h_vendor.vendor_cmd; 2084 bsg_request->rqst_data.h_vendor.vendor_cmd;
2058 link_flags = loopback_mode->type; 2085 link_flags = loopback_mode->type;
2059 timeout = loopback_mode->timeout * 100; 2086 timeout = loopback_mode->timeout * 100;
2060 2087
@@ -2151,10 +2178,11 @@ loopback_mode_exit:
2151 2178
2152job_error: 2179job_error:
2153 /* make error code available to userspace */ 2180 /* make error code available to userspace */
2154 job->reply->result = rc; 2181 bsg_reply->result = rc;
2155 /* complete the job back to userspace if no error */ 2182 /* complete the job back to userspace if no error */
2156 if (rc == 0) 2183 if (rc == 0)
2157 job->job_done(job); 2184 bsg_job_done(job, bsg_reply->result,
2185 bsg_reply->reply_payload_rcv_len);
2158 return rc; 2186 return rc;
2159} 2187}
2160 2188
@@ -2166,17 +2194,17 @@ job_error:
2166 * command from the user to proper driver action routines. 2194 * command from the user to proper driver action routines.
2167 */ 2195 */
2168static int 2196static int
2169lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job) 2197lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2170{ 2198{
2171 struct Scsi_Host *shost; 2199 struct Scsi_Host *shost;
2172 struct lpfc_vport *vport; 2200 struct lpfc_vport *vport;
2173 struct lpfc_hba *phba; 2201 struct lpfc_hba *phba;
2174 int rc; 2202 int rc;
2175 2203
2176 shost = job->shost; 2204 shost = fc_bsg_to_shost(job);
2177 if (!shost) 2205 if (!shost)
2178 return -ENODEV; 2206 return -ENODEV;
2179 vport = (struct lpfc_vport *)job->shost->hostdata; 2207 vport = shost_priv(shost);
2180 if (!vport) 2208 if (!vport)
2181 return -ENODEV; 2209 return -ENODEV;
2182 phba = vport->phba; 2210 phba = vport->phba;
@@ -2202,8 +2230,10 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
2202 * command from the user to proper driver action routines. 2230 * command from the user to proper driver action routines.
2203 */ 2231 */
2204static int 2232static int
2205lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job) 2233lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2206{ 2234{
2235 struct fc_bsg_request *bsg_request = job->request;
2236 struct fc_bsg_reply *bsg_reply = job->reply;
2207 struct Scsi_Host *shost; 2237 struct Scsi_Host *shost;
2208 struct lpfc_vport *vport; 2238 struct lpfc_vport *vport;
2209 struct lpfc_hba *phba; 2239 struct lpfc_hba *phba;
@@ -2211,10 +2241,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
2211 uint32_t timeout; 2241 uint32_t timeout;
2212 int rc, i; 2242 int rc, i;
2213 2243
2214 shost = job->shost; 2244 shost = fc_bsg_to_shost(job);
2215 if (!shost) 2245 if (!shost)
2216 return -ENODEV; 2246 return -ENODEV;
2217 vport = (struct lpfc_vport *)job->shost->hostdata; 2247 vport = shost_priv(shost);
2218 if (!vport) 2248 if (!vport)
2219 return -ENODEV; 2249 return -ENODEV;
2220 phba = vport->phba; 2250 phba = vport->phba;
@@ -2232,7 +2262,7 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
2232 phba->link_flag &= ~LS_LOOPBACK_MODE; 2262 phba->link_flag &= ~LS_LOOPBACK_MODE;
2233 spin_unlock_irq(&phba->hbalock); 2263 spin_unlock_irq(&phba->hbalock);
2234 loopback_mode_end_cmd = (struct diag_mode_set *) 2264 loopback_mode_end_cmd = (struct diag_mode_set *)
2235 job->request->rqst_data.h_vendor.vendor_cmd; 2265 bsg_request->rqst_data.h_vendor.vendor_cmd;
2236 timeout = loopback_mode_end_cmd->timeout * 100; 2266 timeout = loopback_mode_end_cmd->timeout * 100;
2237 2267
2238 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2268 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
@@ -2263,10 +2293,11 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
2263 2293
2264loopback_mode_end_exit: 2294loopback_mode_end_exit:
2265 /* make return code available to userspace */ 2295 /* make return code available to userspace */
2266 job->reply->result = rc; 2296 bsg_reply->result = rc;
2267 /* complete the job back to userspace if no error */ 2297 /* complete the job back to userspace if no error */
2268 if (rc == 0) 2298 if (rc == 0)
2269 job->job_done(job); 2299 bsg_job_done(job, bsg_reply->result,
2300 bsg_reply->reply_payload_rcv_len);
2270 return rc; 2301 return rc;
2271} 2302}
2272 2303
@@ -2278,8 +2309,10 @@ loopback_mode_end_exit:
2278 * applicaiton. 2309 * applicaiton.
2279 */ 2310 */
2280static int 2311static int
2281lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) 2312lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2282{ 2313{
2314 struct fc_bsg_request *bsg_request = job->request;
2315 struct fc_bsg_reply *bsg_reply = job->reply;
2283 struct Scsi_Host *shost; 2316 struct Scsi_Host *shost;
2284 struct lpfc_vport *vport; 2317 struct lpfc_vport *vport;
2285 struct lpfc_hba *phba; 2318 struct lpfc_hba *phba;
@@ -2292,12 +2325,12 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2292 struct diag_status *diag_status_reply; 2325 struct diag_status *diag_status_reply;
2293 int mbxstatus, rc = 0; 2326 int mbxstatus, rc = 0;
2294 2327
2295 shost = job->shost; 2328 shost = fc_bsg_to_shost(job);
2296 if (!shost) { 2329 if (!shost) {
2297 rc = -ENODEV; 2330 rc = -ENODEV;
2298 goto job_error; 2331 goto job_error;
2299 } 2332 }
2300 vport = (struct lpfc_vport *)job->shost->hostdata; 2333 vport = shost_priv(shost);
2301 if (!vport) { 2334 if (!vport) {
2302 rc = -ENODEV; 2335 rc = -ENODEV;
2303 goto job_error; 2336 goto job_error;
@@ -2335,7 +2368,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2335 goto job_error; 2368 goto job_error;
2336 2369
2337 link_diag_test_cmd = (struct sli4_link_diag *) 2370 link_diag_test_cmd = (struct sli4_link_diag *)
2338 job->request->rqst_data.h_vendor.vendor_cmd; 2371 bsg_request->rqst_data.h_vendor.vendor_cmd;
2339 2372
2340 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2373 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2341 2374
@@ -2385,7 +2418,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2385 } 2418 }
2386 2419
2387 diag_status_reply = (struct diag_status *) 2420 diag_status_reply = (struct diag_status *)
2388 job->reply->reply_data.vendor_reply.vendor_rsp; 2421 bsg_reply->reply_data.vendor_reply.vendor_rsp;
2389 2422
2390 if (job->reply_len < 2423 if (job->reply_len <
2391 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { 2424 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
@@ -2413,10 +2446,11 @@ link_diag_test_exit:
2413 2446
2414job_error: 2447job_error:
2415 /* make error code available to userspace */ 2448 /* make error code available to userspace */
2416 job->reply->result = rc; 2449 bsg_reply->result = rc;
2417 /* complete the job back to userspace if no error */ 2450 /* complete the job back to userspace if no error */
2418 if (rc == 0) 2451 if (rc == 0)
2419 job->job_done(job); 2452 bsg_job_done(job, bsg_reply->result,
2453 bsg_reply->reply_payload_rcv_len);
2420 return rc; 2454 return rc;
2421} 2455}
2422 2456
@@ -2982,9 +3016,10 @@ err_post_rxbufs_exit:
2982 * of loopback mode. 3016 * of loopback mode.
2983 **/ 3017 **/
2984static int 3018static int
2985lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) 3019lpfc_bsg_diag_loopback_run(struct bsg_job *job)
2986{ 3020{
2987 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3021 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3022 struct fc_bsg_reply *bsg_reply = job->reply;
2988 struct lpfc_hba *phba = vport->phba; 3023 struct lpfc_hba *phba = vport->phba;
2989 struct lpfc_bsg_event *evt; 3024 struct lpfc_bsg_event *evt;
2990 struct event_data *evdat; 3025 struct event_data *evdat;
@@ -3012,7 +3047,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
3012 uint32_t total_mem; 3047 uint32_t total_mem;
3013 3048
3014 /* in case no data is returned return just the return code */ 3049 /* in case no data is returned return just the return code */
3015 job->reply->reply_payload_rcv_len = 0; 3050 bsg_reply->reply_payload_rcv_len = 0;
3016 3051
3017 if (job->request_len < 3052 if (job->request_len <
3018 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 3053 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
@@ -3237,11 +3272,11 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
3237 rc = IOCB_SUCCESS; 3272 rc = IOCB_SUCCESS;
3238 /* skip over elx loopback header */ 3273 /* skip over elx loopback header */
3239 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 3274 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3240 job->reply->reply_payload_rcv_len = 3275 bsg_reply->reply_payload_rcv_len =
3241 sg_copy_from_buffer(job->reply_payload.sg_list, 3276 sg_copy_from_buffer(job->reply_payload.sg_list,
3242 job->reply_payload.sg_cnt, 3277 job->reply_payload.sg_cnt,
3243 rx_databuf, size); 3278 rx_databuf, size);
3244 job->reply->reply_payload_rcv_len = size; 3279 bsg_reply->reply_payload_rcv_len = size;
3245 } 3280 }
3246 } 3281 }
3247 3282
@@ -3271,11 +3306,12 @@ err_loopback_test_exit:
3271loopback_test_exit: 3306loopback_test_exit:
3272 kfree(dataout); 3307 kfree(dataout);
3273 /* make error code available to userspace */ 3308 /* make error code available to userspace */
3274 job->reply->result = rc; 3309 bsg_reply->result = rc;
3275 job->dd_data = NULL; 3310 job->dd_data = NULL;
3276 /* complete the job back to userspace if no error */ 3311 /* complete the job back to userspace if no error */
3277 if (rc == IOCB_SUCCESS) 3312 if (rc == IOCB_SUCCESS)
3278 job->job_done(job); 3313 bsg_job_done(job, bsg_reply->result,
3314 bsg_reply->reply_payload_rcv_len);
3279 return rc; 3315 return rc;
3280} 3316}
3281 3317
@@ -3284,9 +3320,10 @@ loopback_test_exit:
3284 * @job: GET_DFC_REV fc_bsg_job 3320 * @job: GET_DFC_REV fc_bsg_job
3285 **/ 3321 **/
3286static int 3322static int
3287lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) 3323lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3288{ 3324{
3289 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3325 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3326 struct fc_bsg_reply *bsg_reply = job->reply;
3290 struct lpfc_hba *phba = vport->phba; 3327 struct lpfc_hba *phba = vport->phba;
3291 struct get_mgmt_rev_reply *event_reply; 3328 struct get_mgmt_rev_reply *event_reply;
3292 int rc = 0; 3329 int rc = 0;
@@ -3301,7 +3338,7 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
3301 } 3338 }
3302 3339
3303 event_reply = (struct get_mgmt_rev_reply *) 3340 event_reply = (struct get_mgmt_rev_reply *)
3304 job->reply->reply_data.vendor_reply.vendor_rsp; 3341 bsg_reply->reply_data.vendor_reply.vendor_rsp;
3305 3342
3306 if (job->reply_len < 3343 if (job->reply_len <
3307 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { 3344 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
@@ -3315,9 +3352,10 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
3315 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 3352 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3316 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 3353 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3317job_error: 3354job_error:
3318 job->reply->result = rc; 3355 bsg_reply->result = rc;
3319 if (rc == 0) 3356 if (rc == 0)
3320 job->job_done(job); 3357 bsg_job_done(job, bsg_reply->result,
3358 bsg_reply->reply_payload_rcv_len);
3321 return rc; 3359 return rc;
3322} 3360}
3323 3361
@@ -3336,7 +3374,8 @@ static void
3336lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3374lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3337{ 3375{
3338 struct bsg_job_data *dd_data; 3376 struct bsg_job_data *dd_data;
3339 struct fc_bsg_job *job; 3377 struct fc_bsg_reply *bsg_reply;
3378 struct bsg_job *job;
3340 uint32_t size; 3379 uint32_t size;
3341 unsigned long flags; 3380 unsigned long flags;
3342 uint8_t *pmb, *pmb_buf; 3381 uint8_t *pmb, *pmb_buf;
@@ -3364,8 +3403,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3364 /* Copy the mailbox data to the job if it is still active */ 3403 /* Copy the mailbox data to the job if it is still active */
3365 3404
3366 if (job) { 3405 if (job) {
3406 bsg_reply = job->reply;
3367 size = job->reply_payload.payload_len; 3407 size = job->reply_payload.payload_len;
3368 job->reply->reply_payload_rcv_len = 3408 bsg_reply->reply_payload_rcv_len =
3369 sg_copy_from_buffer(job->reply_payload.sg_list, 3409 sg_copy_from_buffer(job->reply_payload.sg_list,
3370 job->reply_payload.sg_cnt, 3410 job->reply_payload.sg_cnt,
3371 pmb_buf, size); 3411 pmb_buf, size);
@@ -3379,8 +3419,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3379 /* Complete the job if the job is still active */ 3419 /* Complete the job if the job is still active */
3380 3420
3381 if (job) { 3421 if (job) {
3382 job->reply->result = 0; 3422 bsg_reply->result = 0;
3383 job->job_done(job); 3423 bsg_job_done(job, bsg_reply->result,
3424 bsg_reply->reply_payload_rcv_len);
3384 } 3425 }
3385 return; 3426 return;
3386} 3427}
@@ -3510,11 +3551,12 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3510 * This is routine handles BSG job for mailbox commands completions with 3551 * This is routine handles BSG job for mailbox commands completions with
3511 * multiple external buffers. 3552 * multiple external buffers.
3512 **/ 3553 **/
3513static struct fc_bsg_job * 3554static struct bsg_job *
3514lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3555lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3515{ 3556{
3516 struct bsg_job_data *dd_data; 3557 struct bsg_job_data *dd_data;
3517 struct fc_bsg_job *job; 3558 struct bsg_job *job;
3559 struct fc_bsg_reply *bsg_reply;
3518 uint8_t *pmb, *pmb_buf; 3560 uint8_t *pmb, *pmb_buf;
3519 unsigned long flags; 3561 unsigned long flags;
3520 uint32_t size; 3562 uint32_t size;
@@ -3529,6 +3571,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3529 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3571 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3530 job = dd_data->set_job; 3572 job = dd_data->set_job;
3531 if (job) { 3573 if (job) {
3574 bsg_reply = job->reply;
3532 /* Prevent timeout handling from trying to abort job */ 3575 /* Prevent timeout handling from trying to abort job */
3533 job->dd_data = NULL; 3576 job->dd_data = NULL;
3534 } 3577 }
@@ -3559,13 +3602,13 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3559 3602
3560 if (job) { 3603 if (job) {
3561 size = job->reply_payload.payload_len; 3604 size = job->reply_payload.payload_len;
3562 job->reply->reply_payload_rcv_len = 3605 bsg_reply->reply_payload_rcv_len =
3563 sg_copy_from_buffer(job->reply_payload.sg_list, 3606 sg_copy_from_buffer(job->reply_payload.sg_list,
3564 job->reply_payload.sg_cnt, 3607 job->reply_payload.sg_cnt,
3565 pmb_buf, size); 3608 pmb_buf, size);
3566 3609
3567 /* result for successful */ 3610 /* result for successful */
3568 job->reply->result = 0; 3611 bsg_reply->result = 0;
3569 3612
3570 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3613 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3571 "2937 SLI_CONFIG ext-buffer maibox command " 3614 "2937 SLI_CONFIG ext-buffer maibox command "
@@ -3603,7 +3646,8 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3603static void 3646static void
3604lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3647lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3605{ 3648{
3606 struct fc_bsg_job *job; 3649 struct bsg_job *job;
3650 struct fc_bsg_reply *bsg_reply;
3607 3651
3608 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3652 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3609 3653
@@ -3623,9 +3667,11 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3623 mempool_free(pmboxq, phba->mbox_mem_pool); 3667 mempool_free(pmboxq, phba->mbox_mem_pool);
3624 3668
3625 /* if the job is still active, call job done */ 3669 /* if the job is still active, call job done */
3626 if (job) 3670 if (job) {
3627 job->job_done(job); 3671 bsg_reply = job->reply;
3628 3672 bsg_job_done(job, bsg_reply->result,
3673 bsg_reply->reply_payload_rcv_len);
3674 }
3629 return; 3675 return;
3630} 3676}
3631 3677
@@ -3640,7 +3686,8 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3640static void 3686static void
3641lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3687lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3642{ 3688{
3643 struct fc_bsg_job *job; 3689 struct bsg_job *job;
3690 struct fc_bsg_reply *bsg_reply;
3644 3691
3645 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3692 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3646 3693
@@ -3658,8 +3705,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3658 lpfc_bsg_mbox_ext_session_reset(phba); 3705 lpfc_bsg_mbox_ext_session_reset(phba);
3659 3706
3660 /* if the job is still active, call job done */ 3707 /* if the job is still active, call job done */
3661 if (job) 3708 if (job) {
3662 job->job_done(job); 3709 bsg_reply = job->reply;
3710 bsg_job_done(job, bsg_reply->result,
3711 bsg_reply->reply_payload_rcv_len);
3712 }
3663 3713
3664 return; 3714 return;
3665} 3715}
@@ -3768,10 +3818,11 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3768 * non-embedded external bufffers. 3818 * non-embedded external bufffers.
3769 **/ 3819 **/
3770static int 3820static int
3771lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, 3821lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3772 enum nemb_type nemb_tp, 3822 enum nemb_type nemb_tp,
3773 struct lpfc_dmabuf *dmabuf) 3823 struct lpfc_dmabuf *dmabuf)
3774{ 3824{
3825 struct fc_bsg_request *bsg_request = job->request;
3775 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3826 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3776 struct dfc_mbox_req *mbox_req; 3827 struct dfc_mbox_req *mbox_req;
3777 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; 3828 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
@@ -3784,7 +3835,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3784 int rc, i; 3835 int rc, i;
3785 3836
3786 mbox_req = 3837 mbox_req =
3787 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 3838 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3788 3839
3789 /* pointer to the start of mailbox command */ 3840 /* pointer to the start of mailbox command */
3790 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3841 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3955,10 +4006,12 @@ job_error:
3955 * non-embedded external bufffers. 4006 * non-embedded external bufffers.
3956 **/ 4007 **/
3957static int 4008static int
3958lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, 4009lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3959 enum nemb_type nemb_tp, 4010 enum nemb_type nemb_tp,
3960 struct lpfc_dmabuf *dmabuf) 4011 struct lpfc_dmabuf *dmabuf)
3961{ 4012{
4013 struct fc_bsg_request *bsg_request = job->request;
4014 struct fc_bsg_reply *bsg_reply = job->reply;
3962 struct dfc_mbox_req *mbox_req; 4015 struct dfc_mbox_req *mbox_req;
3963 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4016 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3964 uint32_t ext_buf_cnt; 4017 uint32_t ext_buf_cnt;
@@ -3969,7 +4022,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3969 int rc = SLI_CONFIG_NOT_HANDLED, i; 4022 int rc = SLI_CONFIG_NOT_HANDLED, i;
3970 4023
3971 mbox_req = 4024 mbox_req =
3972 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4025 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3973 4026
3974 /* pointer to the start of mailbox command */ 4027 /* pointer to the start of mailbox command */
3975 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4028 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -4096,8 +4149,9 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4096 4149
4097 /* wait for additoinal external buffers */ 4150 /* wait for additoinal external buffers */
4098 4151
4099 job->reply->result = 0; 4152 bsg_reply->result = 0;
4100 job->job_done(job); 4153 bsg_job_done(job, bsg_reply->result,
4154 bsg_reply->reply_payload_rcv_len);
4101 return SLI_CONFIG_HANDLED; 4155 return SLI_CONFIG_HANDLED;
4102 4156
4103job_error: 4157job_error:
@@ -4119,7 +4173,7 @@ job_error:
4119 * with embedded sussystem 0x1 and opcodes with external HBDs. 4173 * with embedded sussystem 0x1 and opcodes with external HBDs.
4120 **/ 4174 **/
4121static int 4175static int
4122lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 4176lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4123 struct lpfc_dmabuf *dmabuf) 4177 struct lpfc_dmabuf *dmabuf)
4124{ 4178{
4125 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4179 struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -4268,8 +4322,9 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4268 * user space through BSG. 4322 * user space through BSG.
4269 **/ 4323 **/
4270static int 4324static int
4271lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job) 4325lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4272{ 4326{
4327 struct fc_bsg_reply *bsg_reply = job->reply;
4273 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4328 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4274 struct lpfc_dmabuf *dmabuf; 4329 struct lpfc_dmabuf *dmabuf;
4275 uint8_t *pbuf; 4330 uint8_t *pbuf;
@@ -4307,7 +4362,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
4307 dmabuf, index); 4362 dmabuf, index);
4308 4363
4309 pbuf = (uint8_t *)dmabuf->virt; 4364 pbuf = (uint8_t *)dmabuf->virt;
4310 job->reply->reply_payload_rcv_len = 4365 bsg_reply->reply_payload_rcv_len =
4311 sg_copy_from_buffer(job->reply_payload.sg_list, 4366 sg_copy_from_buffer(job->reply_payload.sg_list,
4312 job->reply_payload.sg_cnt, 4367 job->reply_payload.sg_cnt,
4313 pbuf, size); 4368 pbuf, size);
@@ -4321,8 +4376,9 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
4321 lpfc_bsg_mbox_ext_session_reset(phba); 4376 lpfc_bsg_mbox_ext_session_reset(phba);
4322 } 4377 }
4323 4378
4324 job->reply->result = 0; 4379 bsg_reply->result = 0;
4325 job->job_done(job); 4380 bsg_job_done(job, bsg_reply->result,
4381 bsg_reply->reply_payload_rcv_len);
4326 4382
4327 return SLI_CONFIG_HANDLED; 4383 return SLI_CONFIG_HANDLED;
4328} 4384}
@@ -4336,9 +4392,10 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
4336 * from user space through BSG. 4392 * from user space through BSG.
4337 **/ 4393 **/
4338static int 4394static int
4339lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, 4395lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4340 struct lpfc_dmabuf *dmabuf) 4396 struct lpfc_dmabuf *dmabuf)
4341{ 4397{
4398 struct fc_bsg_reply *bsg_reply = job->reply;
4342 struct bsg_job_data *dd_data = NULL; 4399 struct bsg_job_data *dd_data = NULL;
4343 LPFC_MBOXQ_t *pmboxq = NULL; 4400 LPFC_MBOXQ_t *pmboxq = NULL;
4344 MAILBOX_t *pmb; 4401 MAILBOX_t *pmb;
@@ -4436,8 +4493,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
4436 } 4493 }
4437 4494
4438 /* wait for additoinal external buffers */ 4495 /* wait for additoinal external buffers */
4439 job->reply->result = 0; 4496 bsg_reply->result = 0;
4440 job->job_done(job); 4497 bsg_job_done(job, bsg_reply->result,
4498 bsg_reply->reply_payload_rcv_len);
4441 return SLI_CONFIG_HANDLED; 4499 return SLI_CONFIG_HANDLED;
4442 4500
4443job_error: 4501job_error:
@@ -4457,7 +4515,7 @@ job_error:
4457 * command with multiple non-embedded external buffers. 4515 * command with multiple non-embedded external buffers.
4458 **/ 4516 **/
4459static int 4517static int
4460lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job, 4518lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4461 struct lpfc_dmabuf *dmabuf) 4519 struct lpfc_dmabuf *dmabuf)
4462{ 4520{
4463 int rc; 4521 int rc;
@@ -4502,14 +4560,15 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
4502 * (0x9B) mailbox commands and external buffers. 4560 * (0x9B) mailbox commands and external buffers.
4503 **/ 4561 **/
4504static int 4562static int
4505lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, 4563lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4506 struct lpfc_dmabuf *dmabuf) 4564 struct lpfc_dmabuf *dmabuf)
4507{ 4565{
4566 struct fc_bsg_request *bsg_request = job->request;
4508 struct dfc_mbox_req *mbox_req; 4567 struct dfc_mbox_req *mbox_req;
4509 int rc = SLI_CONFIG_NOT_HANDLED; 4568 int rc = SLI_CONFIG_NOT_HANDLED;
4510 4569
4511 mbox_req = 4570 mbox_req =
4512 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4571 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4513 4572
4514 /* mbox command with/without single external buffer */ 4573 /* mbox command with/without single external buffer */
4515 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4574 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
@@ -4579,9 +4638,11 @@ sli_cfg_ext_error:
4579 * let our completion handler finish the command. 4638 * let our completion handler finish the command.
4580 **/ 4639 **/
4581static int 4640static int
4582lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 4641lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4583 struct lpfc_vport *vport) 4642 struct lpfc_vport *vport)
4584{ 4643{
4644 struct fc_bsg_request *bsg_request = job->request;
4645 struct fc_bsg_reply *bsg_reply = job->reply;
4585 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4646 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4586 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4647 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4587 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4648 /* a 4k buffer to hold the mb and extended data from/to the bsg */
@@ -4600,7 +4661,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4600 uint32_t size; 4661 uint32_t size;
4601 4662
4602 /* in case no data is transferred */ 4663 /* in case no data is transferred */
4603 job->reply->reply_payload_rcv_len = 0; 4664 bsg_reply->reply_payload_rcv_len = 0;
4604 4665
4605 /* sanity check to protect driver */ 4666 /* sanity check to protect driver */
4606 if (job->reply_payload.payload_len > BSG_MBOX_SIZE || 4667 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
@@ -4619,7 +4680,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4619 } 4680 }
4620 4681
4621 mbox_req = 4682 mbox_req =
4622 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4683 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4623 4684
4624 /* check if requested extended data lengths are valid */ 4685 /* check if requested extended data lengths are valid */
4625 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4686 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
@@ -4841,7 +4902,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4841 4902
4842 /* job finished, copy the data */ 4903 /* job finished, copy the data */
4843 memcpy(pmbx, pmb, sizeof(*pmb)); 4904 memcpy(pmbx, pmb, sizeof(*pmb));
4844 job->reply->reply_payload_rcv_len = 4905 bsg_reply->reply_payload_rcv_len =
4845 sg_copy_from_buffer(job->reply_payload.sg_list, 4906 sg_copy_from_buffer(job->reply_payload.sg_list,
4846 job->reply_payload.sg_cnt, 4907 job->reply_payload.sg_cnt,
4847 pmbx, size); 4908 pmbx, size);
@@ -4870,15 +4931,17 @@ job_cont:
4870 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 4931 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4871 **/ 4932 **/
4872static int 4933static int
4873lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) 4934lpfc_bsg_mbox_cmd(struct bsg_job *job)
4874{ 4935{
4875 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4936 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
4937 struct fc_bsg_request *bsg_request = job->request;
4938 struct fc_bsg_reply *bsg_reply = job->reply;
4876 struct lpfc_hba *phba = vport->phba; 4939 struct lpfc_hba *phba = vport->phba;
4877 struct dfc_mbox_req *mbox_req; 4940 struct dfc_mbox_req *mbox_req;
4878 int rc = 0; 4941 int rc = 0;
4879 4942
4880 /* mix-and-match backward compatibility */ 4943 /* mix-and-match backward compatibility */
4881 job->reply->reply_payload_rcv_len = 0; 4944 bsg_reply->reply_payload_rcv_len = 0;
4882 if (job->request_len < 4945 if (job->request_len <
4883 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4946 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4884 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4947 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -4889,7 +4952,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
4889 sizeof(struct fc_bsg_request)), 4952 sizeof(struct fc_bsg_request)),
4890 (int)sizeof(struct dfc_mbox_req)); 4953 (int)sizeof(struct dfc_mbox_req));
4891 mbox_req = (struct dfc_mbox_req *) 4954 mbox_req = (struct dfc_mbox_req *)
4892 job->request->rqst_data.h_vendor.vendor_cmd; 4955 bsg_request->rqst_data.h_vendor.vendor_cmd;
4893 mbox_req->extMboxTag = 0; 4956 mbox_req->extMboxTag = 0;
4894 mbox_req->extSeqNum = 0; 4957 mbox_req->extSeqNum = 0;
4895 } 4958 }
@@ -4898,15 +4961,16 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
4898 4961
4899 if (rc == 0) { 4962 if (rc == 0) {
4900 /* job done */ 4963 /* job done */
4901 job->reply->result = 0; 4964 bsg_reply->result = 0;
4902 job->dd_data = NULL; 4965 job->dd_data = NULL;
4903 job->job_done(job); 4966 bsg_job_done(job, bsg_reply->result,
4967 bsg_reply->reply_payload_rcv_len);
4904 } else if (rc == 1) 4968 } else if (rc == 1)
4905 /* job submitted, will complete later*/ 4969 /* job submitted, will complete later*/
4906 rc = 0; /* return zero, no error */ 4970 rc = 0; /* return zero, no error */
4907 else { 4971 else {
4908 /* some error occurred */ 4972 /* some error occurred */
4909 job->reply->result = rc; 4973 bsg_reply->result = rc;
4910 job->dd_data = NULL; 4974 job->dd_data = NULL;
4911 } 4975 }
4912 4976
@@ -4936,7 +5000,8 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4936 struct lpfc_iocbq *rspiocbq) 5000 struct lpfc_iocbq *rspiocbq)
4937{ 5001{
4938 struct bsg_job_data *dd_data; 5002 struct bsg_job_data *dd_data;
4939 struct fc_bsg_job *job; 5003 struct bsg_job *job;
5004 struct fc_bsg_reply *bsg_reply;
4940 IOCB_t *rsp; 5005 IOCB_t *rsp;
4941 struct lpfc_dmabuf *bmp, *cmp, *rmp; 5006 struct lpfc_dmabuf *bmp, *cmp, *rmp;
4942 struct lpfc_bsg_menlo *menlo; 5007 struct lpfc_bsg_menlo *menlo;
@@ -4956,6 +5021,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4956 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5021 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4957 job = dd_data->set_job; 5022 job = dd_data->set_job;
4958 if (job) { 5023 if (job) {
5024 bsg_reply = job->reply;
4959 /* Prevent timeout handling from trying to abort job */ 5025 /* Prevent timeout handling from trying to abort job */
4960 job->dd_data = NULL; 5026 job->dd_data = NULL;
4961 } 5027 }
@@ -4970,7 +5036,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4970 */ 5036 */
4971 5037
4972 menlo_resp = (struct menlo_response *) 5038 menlo_resp = (struct menlo_response *)
4973 job->reply->reply_data.vendor_reply.vendor_rsp; 5039 bsg_reply->reply_data.vendor_reply.vendor_rsp;
4974 menlo_resp->xri = rsp->ulpContext; 5040 menlo_resp->xri = rsp->ulpContext;
4975 if (rsp->ulpStatus) { 5041 if (rsp->ulpStatus) {
4976 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 5042 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
@@ -4990,7 +5056,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4990 } 5056 }
4991 } else { 5057 } else {
4992 rsp_size = rsp->un.genreq64.bdl.bdeSize; 5058 rsp_size = rsp->un.genreq64.bdl.bdeSize;
4993 job->reply->reply_payload_rcv_len = 5059 bsg_reply->reply_payload_rcv_len =
4994 lpfc_bsg_copy_data(rmp, &job->reply_payload, 5060 lpfc_bsg_copy_data(rmp, &job->reply_payload,
4995 rsp_size, 0); 5061 rsp_size, 0);
4996 } 5062 }
@@ -5007,8 +5073,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
5007 /* Complete the job if active */ 5073 /* Complete the job if active */
5008 5074
5009 if (job) { 5075 if (job) {
5010 job->reply->result = rc; 5076 bsg_reply->result = rc;
5011 job->job_done(job); 5077 bsg_job_done(job, bsg_reply->result,
5078 bsg_reply->reply_payload_rcv_len);
5012 } 5079 }
5013 5080
5014 return; 5081 return;
@@ -5024,9 +5091,11 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
5024 * supplied in the menlo request header xri field. 5091 * supplied in the menlo request header xri field.
5025 **/ 5092 **/
5026static int 5093static int
5027lpfc_menlo_cmd(struct fc_bsg_job *job) 5094lpfc_menlo_cmd(struct bsg_job *job)
5028{ 5095{
5029 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 5096 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5097 struct fc_bsg_request *bsg_request = job->request;
5098 struct fc_bsg_reply *bsg_reply = job->reply;
5030 struct lpfc_hba *phba = vport->phba; 5099 struct lpfc_hba *phba = vport->phba;
5031 struct lpfc_iocbq *cmdiocbq; 5100 struct lpfc_iocbq *cmdiocbq;
5032 IOCB_t *cmd; 5101 IOCB_t *cmd;
@@ -5039,7 +5108,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
5039 struct ulp_bde64 *bpl = NULL; 5108 struct ulp_bde64 *bpl = NULL;
5040 5109
5041 /* in case no data is returned return just the return code */ 5110 /* in case no data is returned return just the return code */
5042 job->reply->reply_payload_rcv_len = 0; 5111 bsg_reply->reply_payload_rcv_len = 0;
5043 5112
5044 if (job->request_len < 5113 if (job->request_len <
5045 sizeof(struct fc_bsg_request) + 5114 sizeof(struct fc_bsg_request) +
@@ -5069,7 +5138,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
5069 } 5138 }
5070 5139
5071 menlo_cmd = (struct menlo_command *) 5140 menlo_cmd = (struct menlo_command *)
5072 job->request->rqst_data.h_vendor.vendor_cmd; 5141 bsg_request->rqst_data.h_vendor.vendor_cmd;
5073 5142
5074 /* allocate our bsg tracking structure */ 5143 /* allocate our bsg tracking structure */
5075 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 5144 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -5180,19 +5249,65 @@ free_dd:
5180 kfree(dd_data); 5249 kfree(dd_data);
5181no_dd_data: 5250no_dd_data:
5182 /* make error code available to userspace */ 5251 /* make error code available to userspace */
5183 job->reply->result = rc; 5252 bsg_reply->result = rc;
5184 job->dd_data = NULL; 5253 job->dd_data = NULL;
5185 return rc; 5254 return rc;
5186} 5255}
5187 5256
5257static int
5258lpfc_forced_link_speed(struct bsg_job *job)
5259{
5260 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5261 struct lpfc_vport *vport = shost_priv(shost);
5262 struct lpfc_hba *phba = vport->phba;
5263 struct fc_bsg_reply *bsg_reply = job->reply;
5264 struct forced_link_speed_support_reply *forced_reply;
5265 int rc = 0;
5266
5267 if (job->request_len <
5268 sizeof(struct fc_bsg_request) +
5269 sizeof(struct get_forced_link_speed_support)) {
5270 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5271 "0048 Received FORCED_LINK_SPEED request "
5272 "below minimum size\n");
5273 rc = -EINVAL;
5274 goto job_error;
5275 }
5276
5277 forced_reply = (struct forced_link_speed_support_reply *)
5278 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5279
5280 if (job->reply_len <
5281 sizeof(struct fc_bsg_request) +
5282 sizeof(struct forced_link_speed_support_reply)) {
5283 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5284 "0049 Received FORCED_LINK_SPEED reply below "
5285 "minimum size\n");
5286 rc = -EINVAL;
5287 goto job_error;
5288 }
5289
5290 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5291 ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5292 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5293job_error:
5294 bsg_reply->result = rc;
5295 if (rc == 0)
5296 bsg_job_done(job, bsg_reply->result,
5297 bsg_reply->reply_payload_rcv_len);
5298 return rc;
5299}
5300
5188/** 5301/**
5189 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5302 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5190 * @job: fc_bsg_job to handle 5303 * @job: fc_bsg_job to handle
5191 **/ 5304 **/
5192static int 5305static int
5193lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 5306lpfc_bsg_hst_vendor(struct bsg_job *job)
5194{ 5307{
5195 int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; 5308 struct fc_bsg_request *bsg_request = job->request;
5309 struct fc_bsg_reply *bsg_reply = job->reply;
5310 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5196 int rc; 5311 int rc;
5197 5312
5198 switch (command) { 5313 switch (command) {
@@ -5227,11 +5342,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
5227 case LPFC_BSG_VENDOR_MENLO_DATA: 5342 case LPFC_BSG_VENDOR_MENLO_DATA:
5228 rc = lpfc_menlo_cmd(job); 5343 rc = lpfc_menlo_cmd(job);
5229 break; 5344 break;
5345 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5346 rc = lpfc_forced_link_speed(job);
5347 break;
5230 default: 5348 default:
5231 rc = -EINVAL; 5349 rc = -EINVAL;
5232 job->reply->reply_payload_rcv_len = 0; 5350 bsg_reply->reply_payload_rcv_len = 0;
5233 /* make error code available to userspace */ 5351 /* make error code available to userspace */
5234 job->reply->result = rc; 5352 bsg_reply->result = rc;
5235 break; 5353 break;
5236 } 5354 }
5237 5355
@@ -5243,12 +5361,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
5243 * @job: fc_bsg_job to handle 5361 * @job: fc_bsg_job to handle
5244 **/ 5362 **/
5245int 5363int
5246lpfc_bsg_request(struct fc_bsg_job *job) 5364lpfc_bsg_request(struct bsg_job *job)
5247{ 5365{
5366 struct fc_bsg_request *bsg_request = job->request;
5367 struct fc_bsg_reply *bsg_reply = job->reply;
5248 uint32_t msgcode; 5368 uint32_t msgcode;
5249 int rc; 5369 int rc;
5250 5370
5251 msgcode = job->request->msgcode; 5371 msgcode = bsg_request->msgcode;
5252 switch (msgcode) { 5372 switch (msgcode) {
5253 case FC_BSG_HST_VENDOR: 5373 case FC_BSG_HST_VENDOR:
5254 rc = lpfc_bsg_hst_vendor(job); 5374 rc = lpfc_bsg_hst_vendor(job);
@@ -5261,9 +5381,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
5261 break; 5381 break;
5262 default: 5382 default:
5263 rc = -EINVAL; 5383 rc = -EINVAL;
5264 job->reply->reply_payload_rcv_len = 0; 5384 bsg_reply->reply_payload_rcv_len = 0;
5265 /* make error code available to userspace */ 5385 /* make error code available to userspace */
5266 job->reply->result = rc; 5386 bsg_reply->result = rc;
5267 break; 5387 break;
5268 } 5388 }
5269 5389
@@ -5278,9 +5398,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
5278 * the waiting function which will handle passing the error back to userspace 5398 * the waiting function which will handle passing the error back to userspace
5279 **/ 5399 **/
5280int 5400int
5281lpfc_bsg_timeout(struct fc_bsg_job *job) 5401lpfc_bsg_timeout(struct bsg_job *job)
5282{ 5402{
5283 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 5403 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5284 struct lpfc_hba *phba = vport->phba; 5404 struct lpfc_hba *phba = vport->phba;
5285 struct lpfc_iocbq *cmdiocb; 5405 struct lpfc_iocbq *cmdiocb;
5286 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5406 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index e557bcdbcb19..f2247aa4fa17 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -35,6 +35,7 @@
35#define LPFC_BSG_VENDOR_MENLO_DATA 9 35#define LPFC_BSG_VENDOR_MENLO_DATA 9
36#define LPFC_BSG_VENDOR_DIAG_MODE_END 10 36#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
37#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11 37#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
38#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
38 39
39struct set_ct_event { 40struct set_ct_event {
40 uint32_t command; 41 uint32_t command;
@@ -284,6 +285,15 @@ struct lpfc_sli_config_mbox {
284 } un; 285 } un;
285}; 286};
286 287
288#define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED 0
289#define LPFC_FORCED_LINK_SPEED_SUPPORTED 1
290struct get_forced_link_speed_support {
291 uint32_t command;
292};
293struct forced_link_speed_support_reply {
294 uint8_t supported;
295};
296
287/* driver only */ 297/* driver only */
288#define SLI_CONFIG_NOT_HANDLED 0 298#define SLI_CONFIG_NOT_HANDLED 0
289#define SLI_CONFIG_HANDLED 1 299#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bd7576d452f2..15d2bfdf582d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -397,8 +397,6 @@ extern spinlock_t _dump_buf_lock;
397extern int _dump_buf_done; 397extern int _dump_buf_done;
398extern spinlock_t pgcnt_lock; 398extern spinlock_t pgcnt_lock;
399extern unsigned int pgcnt; 399extern unsigned int pgcnt;
400extern unsigned int lpfc_prot_mask;
401extern unsigned char lpfc_prot_guard;
402extern unsigned int lpfc_fcp_look_ahead; 400extern unsigned int lpfc_fcp_look_ahead;
403 401
404/* Interface exported by fabric iocb scheduler */ 402/* Interface exported by fabric iocb scheduler */
@@ -431,8 +429,8 @@ struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
431#define HBA_EVENT_LINK_DOWN 3 429#define HBA_EVENT_LINK_DOWN 3
432 430
433/* functions to support SGIOv4/bsg interface */ 431/* functions to support SGIOv4/bsg interface */
434int lpfc_bsg_request(struct fc_bsg_job *); 432int lpfc_bsg_request(struct bsg_job *);
435int lpfc_bsg_timeout(struct fc_bsg_job *); 433int lpfc_bsg_timeout(struct bsg_job *);
436int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 434int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
437 struct lpfc_iocbq *); 435 struct lpfc_iocbq *);
438int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); 436int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b7d54bfb1df9..236e4e51d161 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7610,7 +7610,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7610 /* reject till our FLOGI completes */ 7610 /* reject till our FLOGI completes */
7611 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 7611 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
7612 (cmd != ELS_CMD_FLOGI)) { 7612 (cmd != ELS_CMD_FLOGI)) {
7613 rjt_err = LSRJT_UNABLE_TPC; 7613 rjt_err = LSRJT_LOGICAL_BSY;
7614 rjt_exp = LSEXP_NOTHING_MORE; 7614 rjt_exp = LSEXP_NOTHING_MORE;
7615 goto lsrjt; 7615 goto lsrjt;
7616 } 7616 }
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index ee8022737591..5646699b0516 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -921,6 +921,7 @@ struct mbox_header {
921#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D 921#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
922#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A 922#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
923#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B 923#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
924#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D
924#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73 925#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
925#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74 926#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
926#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A 927#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
@@ -2289,6 +2290,9 @@ struct lpfc_mbx_read_config {
2289#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 2290#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
2290#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF 2291#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
2291#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 2292#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
2293#define lpfc_mbx_rd_conf_link_speed_SHIFT 16
2294#define lpfc_mbx_rd_conf_link_speed_MASK 0x0000FFFF
2295#define lpfc_mbx_rd_conf_link_speed_WORD word6
2292 uint32_t rsvd_7; 2296 uint32_t rsvd_7;
2293 uint32_t rsvd_8; 2297 uint32_t rsvd_8;
2294 uint32_t word9; 2298 uint32_t word9;
@@ -2919,6 +2923,16 @@ struct lpfc_mbx_set_feature {
2919}; 2923};
2920 2924
2921 2925
2926#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2
2927struct lpfc_mbx_set_host_data {
2928#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48
2929 struct mbox_header header;
2930 uint32_t param_id;
2931 uint32_t param_len;
2932 uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
2933};
2934
2935
2922struct lpfc_mbx_get_sli4_parameters { 2936struct lpfc_mbx_get_sli4_parameters {
2923 struct mbox_header header; 2937 struct mbox_header header;
2924 struct lpfc_sli4_parameters sli4_parameters; 2938 struct lpfc_sli4_parameters sli4_parameters;
@@ -3313,6 +3327,7 @@ struct lpfc_mqe {
3313 struct lpfc_mbx_get_port_name get_port_name; 3327 struct lpfc_mbx_get_port_name get_port_name;
3314 struct lpfc_mbx_set_feature set_feature; 3328 struct lpfc_mbx_set_feature set_feature;
3315 struct lpfc_mbx_memory_dump_type3 mem_dump_type3; 3329 struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
3330 struct lpfc_mbx_set_host_data set_host_data;
3316 struct lpfc_mbx_nop nop; 3331 struct lpfc_mbx_nop nop;
3317 } un; 3332 } un;
3318}; 3333};
@@ -3981,7 +3996,8 @@ union lpfc_wqe128 {
3981 struct gen_req64_wqe gen_req; 3996 struct gen_req64_wqe gen_req;
3982}; 3997};
3983 3998
3984#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001 3999#define LPFC_GROUP_OJECT_MAGIC_G5 0xfeaa0001
4000#define LPFC_GROUP_OJECT_MAGIC_G6 0xfeaa0003
3985#define LPFC_FILE_TYPE_GROUP 0xf7 4001#define LPFC_FILE_TYPE_GROUP 0xf7
3986#define LPFC_FILE_ID_GROUP 0xa2 4002#define LPFC_FILE_ID_GROUP 0xa2
3987struct lpfc_grp_hdr { 4003struct lpfc_grp_hdr {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 734a0428ef0e..4776fd85514f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6279,34 +6279,36 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
6279 uint32_t old_guard; 6279 uint32_t old_guard;
6280 6280
6281 int pagecnt = 10; 6281 int pagecnt = 10;
6282 if (lpfc_prot_mask && lpfc_prot_guard) { 6282 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
6283 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6283 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6284 "1478 Registering BlockGuard with the " 6284 "1478 Registering BlockGuard with the "
6285 "SCSI layer\n"); 6285 "SCSI layer\n");
6286 6286
6287 old_mask = lpfc_prot_mask; 6287 old_mask = phba->cfg_prot_mask;
6288 old_guard = lpfc_prot_guard; 6288 old_guard = phba->cfg_prot_guard;
6289 6289
6290 /* Only allow supported values */ 6290 /* Only allow supported values */
6291 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 6291 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
6292 SHOST_DIX_TYPE0_PROTECTION | 6292 SHOST_DIX_TYPE0_PROTECTION |
6293 SHOST_DIX_TYPE1_PROTECTION); 6293 SHOST_DIX_TYPE1_PROTECTION);
6294 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); 6294 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
6295 SHOST_DIX_GUARD_CRC);
6295 6296
6296 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 6297 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
6297 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 6298 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
6298 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 6299 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
6299 6300
6300 if (lpfc_prot_mask && lpfc_prot_guard) { 6301 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
6301 if ((old_mask != lpfc_prot_mask) || 6302 if ((old_mask != phba->cfg_prot_mask) ||
6302 (old_guard != lpfc_prot_guard)) 6303 (old_guard != phba->cfg_prot_guard))
6303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6304 "1475 Registering BlockGuard with the " 6305 "1475 Registering BlockGuard with the "
6305 "SCSI layer: mask %d guard %d\n", 6306 "SCSI layer: mask %d guard %d\n",
6306 lpfc_prot_mask, lpfc_prot_guard); 6307 phba->cfg_prot_mask,
6308 phba->cfg_prot_guard);
6307 6309
6308 scsi_host_set_prot(shost, lpfc_prot_mask); 6310 scsi_host_set_prot(shost, phba->cfg_prot_mask);
6309 scsi_host_set_guard(shost, lpfc_prot_guard); 6311 scsi_host_set_guard(shost, phba->cfg_prot_guard);
6310 } else 6312 } else
6311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6313 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6312 "1479 Not Registering BlockGuard with the SCSI " 6314 "1479 Not Registering BlockGuard with the SCSI "
@@ -6929,6 +6931,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6929 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6931 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6930 struct lpfc_rsrc_desc_fcfcoe *desc; 6932 struct lpfc_rsrc_desc_fcfcoe *desc;
6931 char *pdesc_0; 6933 char *pdesc_0;
6934 uint16_t forced_link_speed;
6935 uint32_t if_type;
6932 int length, i, rc = 0, rc2; 6936 int length, i, rc = 0, rc2;
6933 6937
6934 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6938 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -7022,6 +7026,58 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
7022 if (rc) 7026 if (rc)
7023 goto read_cfg_out; 7027 goto read_cfg_out;
7024 7028
7029 /* Update link speed if forced link speed is supported */
7030 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7031 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7032 forced_link_speed =
7033 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
7034 if (forced_link_speed) {
7035 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
7036
7037 switch (forced_link_speed) {
7038 case LINK_SPEED_1G:
7039 phba->cfg_link_speed =
7040 LPFC_USER_LINK_SPEED_1G;
7041 break;
7042 case LINK_SPEED_2G:
7043 phba->cfg_link_speed =
7044 LPFC_USER_LINK_SPEED_2G;
7045 break;
7046 case LINK_SPEED_4G:
7047 phba->cfg_link_speed =
7048 LPFC_USER_LINK_SPEED_4G;
7049 break;
7050 case LINK_SPEED_8G:
7051 phba->cfg_link_speed =
7052 LPFC_USER_LINK_SPEED_8G;
7053 break;
7054 case LINK_SPEED_10G:
7055 phba->cfg_link_speed =
7056 LPFC_USER_LINK_SPEED_10G;
7057 break;
7058 case LINK_SPEED_16G:
7059 phba->cfg_link_speed =
7060 LPFC_USER_LINK_SPEED_16G;
7061 break;
7062 case LINK_SPEED_32G:
7063 phba->cfg_link_speed =
7064 LPFC_USER_LINK_SPEED_32G;
7065 break;
7066 case 0xffff:
7067 phba->cfg_link_speed =
7068 LPFC_USER_LINK_SPEED_AUTO;
7069 break;
7070 default:
7071 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7072 "0047 Unrecognized link "
7073 "speed : %d\n",
7074 forced_link_speed);
7075 phba->cfg_link_speed =
7076 LPFC_USER_LINK_SPEED_AUTO;
7077 }
7078 }
7079 }
7080
7025 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 7081 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
7026 length = phba->sli4_hba.max_cfg_param.max_xri - 7082 length = phba->sli4_hba.max_cfg_param.max_xri -
7027 lpfc_sli4_get_els_iocb_cnt(phba); 7083 lpfc_sli4_get_els_iocb_cnt(phba);
@@ -7256,6 +7312,7 @@ int
7256lpfc_sli4_queue_create(struct lpfc_hba *phba) 7312lpfc_sli4_queue_create(struct lpfc_hba *phba)
7257{ 7313{
7258 struct lpfc_queue *qdesc; 7314 struct lpfc_queue *qdesc;
7315 uint32_t wqesize;
7259 int idx; 7316 int idx;
7260 7317
7261 /* 7318 /*
@@ -7340,15 +7397,10 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
7340 phba->sli4_hba.fcp_cq[idx] = qdesc; 7397 phba->sli4_hba.fcp_cq[idx] = qdesc;
7341 7398
7342 /* Create Fast Path FCP WQs */ 7399 /* Create Fast Path FCP WQs */
7343 if (phba->fcp_embed_io) { 7400 wqesize = (phba->fcp_embed_io) ?
7344 qdesc = lpfc_sli4_queue_alloc(phba, 7401 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
7345 LPFC_WQE128_SIZE, 7402 qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
7346 LPFC_WQE128_DEF_COUNT); 7403 phba->sli4_hba.wq_ecount);
7347 } else {
7348 qdesc = lpfc_sli4_queue_alloc(phba,
7349 phba->sli4_hba.wq_esize,
7350 phba->sli4_hba.wq_ecount);
7351 }
7352 if (!qdesc) { 7404 if (!qdesc) {
7353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7354 "0503 Failed allocate fast-path FCP " 7406 "0503 Failed allocate fast-path FCP "
@@ -10260,6 +10312,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
10260 int i, rc = 0; 10312 int i, rc = 0;
10261 struct lpfc_dmabuf *dmabuf, *next; 10313 struct lpfc_dmabuf *dmabuf, *next;
10262 uint32_t offset = 0, temp_offset = 0; 10314 uint32_t offset = 0, temp_offset = 0;
10315 uint32_t magic_number, ftype, fid, fsize;
10263 10316
10264 /* It can be null in no-wait mode, sanity check */ 10317 /* It can be null in no-wait mode, sanity check */
10265 if (!fw) { 10318 if (!fw) {
@@ -10268,18 +10321,19 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
10268 } 10321 }
10269 image = (struct lpfc_grp_hdr *)fw->data; 10322 image = (struct lpfc_grp_hdr *)fw->data;
10270 10323
10324 magic_number = be32_to_cpu(image->magic_number);
10325 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
10326 fid = bf_get_be32(lpfc_grp_hdr_id, image),
10327 fsize = be32_to_cpu(image->size);
10328
10271 INIT_LIST_HEAD(&dma_buffer_list); 10329 INIT_LIST_HEAD(&dma_buffer_list);
10272 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 10330 if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
10273 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 10331 magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
10274 LPFC_FILE_TYPE_GROUP) || 10332 ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
10275 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
10276 (be32_to_cpu(image->size) != fw->size)) {
10277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10278 "3022 Invalid FW image found. " 10334 "3022 Invalid FW image found. "
10279 "Magic:%x Type:%x ID:%x\n", 10335 "Magic:%x Type:%x ID:%x Size %d %zd\n",
10280 be32_to_cpu(image->magic_number), 10336 magic_number, ftype, fid, fsize, fw->size);
10281 bf_get_be32(lpfc_grp_hdr_file_type, image),
10282 bf_get_be32(lpfc_grp_hdr_id, image));
10283 rc = -EINVAL; 10337 rc = -EINVAL;
10284 goto release_out; 10338 goto release_out;
10285 } 10339 }
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d197aa176dee..ad350d969bdc 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -413,15 +413,13 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
413 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 413 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
414 * necessary to support the sg_tablesize. 414 * necessary to support the sg_tablesize.
415 */ 415 */
416 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 416 psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
417 GFP_KERNEL, &psb->dma_handle); 417 GFP_KERNEL, &psb->dma_handle);
418 if (!psb->data) { 418 if (!psb->data) {
419 kfree(psb); 419 kfree(psb);
420 break; 420 break;
421 } 421 }
422 422
423 /* Initialize virtual ptrs to dma_buf region. */
424 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
425 423
426 /* Allocate iotag for psb->cur_iocbq. */ 424 /* Allocate iotag for psb->cur_iocbq. */
427 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 425 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
@@ -607,7 +605,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
607} 605}
608 606
609/** 607/**
610 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list 608 * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
611 * @phba: pointer to lpfc hba data structure. 609 * @phba: pointer to lpfc hba data structure.
612 * @post_sblist: pointer to the scsi buffer list. 610 * @post_sblist: pointer to the scsi buffer list.
613 * 611 *
@@ -736,7 +734,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
736} 734}
737 735
738/** 736/**
739 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls 737 * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
740 * @phba: pointer to lpfc hba data structure. 738 * @phba: pointer to lpfc hba data structure.
741 * 739 *
742 * This routine walks the list of scsi buffers that have been allocated and 740 * This routine walks the list of scsi buffers that have been allocated and
@@ -821,13 +819,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
821 * for the struct fcp_cmnd, struct fcp_rsp and the number 819 * for the struct fcp_cmnd, struct fcp_rsp and the number
822 * of bde's necessary to support the sg_tablesize. 820 * of bde's necessary to support the sg_tablesize.
823 */ 821 */
824 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 822 psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
825 GFP_KERNEL, &psb->dma_handle); 823 GFP_KERNEL, &psb->dma_handle);
826 if (!psb->data) { 824 if (!psb->data) {
827 kfree(psb); 825 kfree(psb);
828 break; 826 break;
829 } 827 }
830 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
831 828
832 /* 829 /*
833 * 4K Page alignment is CRITICAL to BlockGuard, double check 830 * 4K Page alignment is CRITICAL to BlockGuard, double check
@@ -857,7 +854,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
857 psb->data, psb->dma_handle); 854 psb->data, psb->dma_handle);
858 kfree(psb); 855 kfree(psb);
859 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 856 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
860 "3368 Failed to allocated IOTAG for" 857 "3368 Failed to allocate IOTAG for"
861 " XRI:0x%x\n", lxri); 858 " XRI:0x%x\n", lxri);
862 lpfc_sli4_free_xri(phba, lxri); 859 lpfc_sli4_free_xri(phba, lxri);
863 break; 860 break;
@@ -1136,7 +1133,7 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1136 * 1133 *
1137 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 1134 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1138 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 1135 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1139 * through sg elements and format the bdea. This routine also initializes all 1136 * through sg elements and format the bde. This routine also initializes all
1140 * IOCB fields which are dependent on scsi command request buffer. 1137 * IOCB fields which are dependent on scsi command request buffer.
1141 * 1138 *
1142 * Return codes: 1139 * Return codes:
@@ -1269,13 +1266,16 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1269 1266
1270#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1267#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1271 1268
1272/* Return if if error injection is detected by Initiator */ 1269/* Return BG_ERR_INIT if error injection is detected by Initiator */
1273#define BG_ERR_INIT 0x1 1270#define BG_ERR_INIT 0x1
1274/* Return if if error injection is detected by Target */ 1271/* Return BG_ERR_TGT if error injection is detected by Target */
1275#define BG_ERR_TGT 0x2 1272#define BG_ERR_TGT 0x2
1276/* Return if if swapping CSUM<-->CRC is required for error injection */ 1273/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
1277#define BG_ERR_SWAP 0x10 1274#define BG_ERR_SWAP 0x10
1278/* Return if disabling Guard/Ref/App checking is required for error injection */ 1275/**
1276 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1277 * error injection
1278 **/
1279#define BG_ERR_CHECK 0x20 1279#define BG_ERR_CHECK 0x20
1280 1280
1281/** 1281/**
@@ -4139,13 +4139,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4139 4139
4140 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4140 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4141 4141
4142 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4143 cmd->scsi_done(cmd);
4144
4145 spin_lock_irqsave(&phba->hbalock, flags); 4142 spin_lock_irqsave(&phba->hbalock, flags);
4146 lpfc_cmd->pCmd = NULL; 4143 lpfc_cmd->pCmd = NULL;
4147 spin_unlock_irqrestore(&phba->hbalock, flags); 4144 spin_unlock_irqrestore(&phba->hbalock, flags);
4148 4145
4146 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4147 cmd->scsi_done(cmd);
4148
4149 /* 4149 /*
4150 * If there is a thread waiting for command completion 4150 * If there is a thread waiting for command completion
4151 * wake up the thread. 4151 * wake up the thread.
@@ -4822,7 +4822,7 @@ wait_for_cmpl:
4822 ret = FAILED; 4822 ret = FAILED;
4823 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 4823 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4824 "0748 abort handler timed out waiting " 4824 "0748 abort handler timed out waiting "
4825 "for abortng I/O (xri:x%x) to complete: " 4825 "for aborting I/O (xri:x%x) to complete: "
4826 "ret %#x, ID %d, LUN %llu\n", 4826 "ret %#x, ID %d, LUN %llu\n",
4827 iocb->sli4_xritag, ret, 4827 iocb->sli4_xritag, ret,
4828 cmnd->device->id, cmnd->device->lun); 4828 cmnd->device->id, cmnd->device->lun);
@@ -4945,26 +4945,30 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
4945 * 0x2002 - Success. 4945 * 0x2002 - Success.
4946 **/ 4946 **/
4947static int 4947static int
4948lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, 4948lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
4949 unsigned tgt_id, uint64_t lun_id, 4949 unsigned int tgt_id, uint64_t lun_id,
4950 uint8_t task_mgmt_cmd) 4950 uint8_t task_mgmt_cmd)
4951{ 4951{
4952 struct lpfc_hba *phba = vport->phba; 4952 struct lpfc_hba *phba = vport->phba;
4953 struct lpfc_scsi_buf *lpfc_cmd; 4953 struct lpfc_scsi_buf *lpfc_cmd;
4954 struct lpfc_iocbq *iocbq; 4954 struct lpfc_iocbq *iocbq;
4955 struct lpfc_iocbq *iocbqrsp; 4955 struct lpfc_iocbq *iocbqrsp;
4956 struct lpfc_nodelist *pnode = rdata->pnode; 4956 struct lpfc_rport_data *rdata;
4957 struct lpfc_nodelist *pnode;
4957 int ret; 4958 int ret;
4958 int status; 4959 int status;
4959 4960
4960 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 4961 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4962 if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
4961 return FAILED; 4963 return FAILED;
4964 pnode = rdata->pnode;
4962 4965
4963 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); 4966 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
4964 if (lpfc_cmd == NULL) 4967 if (lpfc_cmd == NULL)
4965 return FAILED; 4968 return FAILED;
4966 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; 4969 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
4967 lpfc_cmd->rdata = rdata; 4970 lpfc_cmd->rdata = rdata;
4971 lpfc_cmd->pCmd = cmnd;
4968 4972
4969 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 4973 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4970 task_mgmt_cmd); 4974 task_mgmt_cmd);
@@ -5171,7 +5175,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5171 fc_host_post_vendor_event(shost, fc_get_event_number(), 5175 fc_host_post_vendor_event(shost, fc_get_event_number(),
5172 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 5176 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5173 5177
5174 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, 5178 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5175 FCP_LUN_RESET); 5179 FCP_LUN_RESET);
5176 5180
5177 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5181 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5249,7 +5253,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5249 fc_host_post_vendor_event(shost, fc_get_event_number(), 5253 fc_host_post_vendor_event(shost, fc_get_event_number(),
5250 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 5254 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5251 5255
5252 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, 5256 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5253 FCP_TARGET_RESET); 5257 FCP_TARGET_RESET);
5254 5258
5255 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5259 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5328,7 +5332,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5328 if (!match) 5332 if (!match)
5329 continue; 5333 continue;
5330 5334
5331 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data, 5335 status = lpfc_send_taskmgmt(vport, cmnd,
5332 i, 0, FCP_TARGET_RESET); 5336 i, 0, FCP_TARGET_RESET);
5333 5337
5334 if (status != SUCCESS) { 5338 if (status != SUCCESS) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f4f77c5b0c83..4faa7672fc1d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -47,6 +47,7 @@
47#include "lpfc_compat.h" 47#include "lpfc_compat.h"
48#include "lpfc_debugfs.h" 48#include "lpfc_debugfs.h"
49#include "lpfc_vport.h" 49#include "lpfc_vport.h"
50#include "lpfc_version.h"
50 51
51/* There are only four IOCB completion types. */ 52/* There are only four IOCB completion types. */
52typedef enum _lpfc_iocb_type { 53typedef enum _lpfc_iocb_type {
@@ -2678,15 +2679,16 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2678 2679
2679 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2680 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2680 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2681 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2681 list_del_init(&cmd_iocb->list);
2682 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2682 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2683 /* remove from txcmpl queue list */
2684 list_del_init(&cmd_iocb->list);
2683 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2685 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2686 return cmd_iocb;
2684 } 2687 }
2685 return cmd_iocb;
2686 } 2688 }
2687 2689
2688 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2690 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2689 "0317 iotag x%x is out off " 2691 "0317 iotag x%x is out of "
2690 "range: max iotag x%x wd0 x%x\n", 2692 "range: max iotag x%x wd0 x%x\n",
2691 iotag, phba->sli.last_iotag, 2693 iotag, phba->sli.last_iotag,
2692 *(((uint32_t *) &prspiocb->iocb) + 7)); 2694 *(((uint32_t *) &prspiocb->iocb) + 7));
@@ -2721,8 +2723,9 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2721 return cmd_iocb; 2723 return cmd_iocb;
2722 } 2724 }
2723 } 2725 }
2726
2724 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2725 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2728 "0372 iotag x%x is out of range: max iotag (x%x)\n",
2726 iotag, phba->sli.last_iotag); 2729 iotag, phba->sli.last_iotag);
2727 return NULL; 2730 return NULL;
2728} 2731}
@@ -6291,6 +6294,25 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6291 return 0; 6294 return 0;
6292} 6295}
6293 6296
6297void
6298lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6299{
6300 uint32_t len;
6301
6302 len = sizeof(struct lpfc_mbx_set_host_data) -
6303 sizeof(struct lpfc_sli4_cfg_mhdr);
6304 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6305 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6306 LPFC_SLI4_MBX_EMBED);
6307
6308 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6309 mbox->u.mqe.un.set_host_data.param_len = 8;
6310 snprintf(mbox->u.mqe.un.set_host_data.data,
6311 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6312 "Linux %s v"LPFC_DRIVER_VERSION,
6313 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6314}
6315
6294/** 6316/**
6295 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 6317 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6296 * @phba: Pointer to HBA context object. 6318 * @phba: Pointer to HBA context object.
@@ -6542,6 +6564,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6542 goto out_free_mbox; 6564 goto out_free_mbox;
6543 } 6565 }
6544 6566
6567 lpfc_set_host_data(phba, mboxq);
6568
6569 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6570 if (rc) {
6571 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6572 "2134 Failed to set host os driver version %x",
6573 rc);
6574 }
6575
6545 /* Read the port's service parameters. */ 6576 /* Read the port's service parameters. */
6546 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6577 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6547 if (rc) { 6578 if (rc) {
@@ -11781,6 +11812,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11781 /* Look up the ELS command IOCB and create pseudo response IOCB */ 11812 /* Look up the ELS command IOCB and create pseudo response IOCB */
11782 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11813 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11783 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11814 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11815 /* Put the iocb back on the txcmplq */
11816 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
11784 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11817 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11785 11818
11786 if (unlikely(!cmdiocbq)) { 11819 if (unlikely(!cmdiocbq)) {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c9bf20eb7223..50bfc43ebcb0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "11.2.0.0." 21#define LPFC_DRIVER_VERSION "11.2.0.2"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index a590089b9397..ccb68d12692c 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -28,17 +28,15 @@
28 28
29/* Definitions for the core NCR5380 driver. */ 29/* Definitions for the core NCR5380 driver. */
30 30
31#define NCR5380_implementation_fields unsigned char *pdma_base; \ 31#define NCR5380_implementation_fields int pdma_residual
32 int pdma_residual
33 32
34#define NCR5380_read(reg) macscsi_read(instance, reg) 33#define NCR5380_read(reg) in_8(hostdata->io + ((reg) << 4))
35#define NCR5380_write(reg, value) macscsi_write(instance, reg, value) 34#define NCR5380_write(reg, value) out_8(hostdata->io + ((reg) << 4), value)
36 35
37#define NCR5380_dma_xfer_len(instance, cmd, phase) \ 36#define NCR5380_dma_xfer_len macscsi_dma_xfer_len
38 macscsi_dma_xfer_len(instance, cmd)
39#define NCR5380_dma_recv_setup macscsi_pread 37#define NCR5380_dma_recv_setup macscsi_pread
40#define NCR5380_dma_send_setup macscsi_pwrite 38#define NCR5380_dma_send_setup macscsi_pwrite
41#define NCR5380_dma_residual(instance) (hostdata->pdma_residual) 39#define NCR5380_dma_residual macscsi_dma_residual
42 40
43#define NCR5380_intr macscsi_intr 41#define NCR5380_intr macscsi_intr
44#define NCR5380_queue_command macscsi_queue_command 42#define NCR5380_queue_command macscsi_queue_command
@@ -61,20 +59,6 @@ module_param(setup_hostid, int, 0);
61static int setup_toshiba_delay = -1; 59static int setup_toshiba_delay = -1;
62module_param(setup_toshiba_delay, int, 0); 60module_param(setup_toshiba_delay, int, 0);
63 61
64/*
65 * NCR 5380 register access functions
66 */
67
68static inline char macscsi_read(struct Scsi_Host *instance, int reg)
69{
70 return in_8(instance->base + (reg << 4));
71}
72
73static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value)
74{
75 out_8(instance->base + (reg << 4), value);
76}
77
78#ifndef MODULE 62#ifndef MODULE
79static int __init mac_scsi_setup(char *str) 63static int __init mac_scsi_setup(char *str)
80{ 64{
@@ -167,16 +151,15 @@ __asm__ __volatile__ \
167 : "0"(s), "1"(d), "2"(n) \ 151 : "0"(s), "1"(d), "2"(n) \
168 : "d0") 152 : "d0")
169 153
170static int macscsi_pread(struct Scsi_Host *instance, 154static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
171 unsigned char *dst, int len) 155 unsigned char *dst, int len)
172{ 156{
173 struct NCR5380_hostdata *hostdata = shost_priv(instance); 157 unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
174 unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
175 unsigned char *d = dst; 158 unsigned char *d = dst;
176 int n = len; 159 int n = len;
177 int transferred; 160 int transferred;
178 161
179 while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, 162 while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
180 BASR_DRQ | BASR_PHASE_MATCH, 163 BASR_DRQ | BASR_PHASE_MATCH,
181 BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { 164 BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
182 CP_IO_TO_MEM(s, d, n); 165 CP_IO_TO_MEM(s, d, n);
@@ -189,23 +172,23 @@ static int macscsi_pread(struct Scsi_Host *instance,
189 return 0; 172 return 0;
190 173
191 /* Target changed phase early? */ 174 /* Target changed phase early? */
192 if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, 175 if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
193 BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) 176 BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
194 scmd_printk(KERN_ERR, hostdata->connected, 177 scmd_printk(KERN_ERR, hostdata->connected,
195 "%s: !REQ and !ACK\n", __func__); 178 "%s: !REQ and !ACK\n", __func__);
196 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) 179 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
197 return 0; 180 return 0;
198 181
199 dsprintk(NDEBUG_PSEUDO_DMA, instance, 182 dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
200 "%s: bus error (%d/%d)\n", __func__, transferred, len); 183 "%s: bus error (%d/%d)\n", __func__, transferred, len);
201 NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); 184 NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
202 d = dst + transferred; 185 d = dst + transferred;
203 n = len - transferred; 186 n = len - transferred;
204 } 187 }
205 188
206 scmd_printk(KERN_ERR, hostdata->connected, 189 scmd_printk(KERN_ERR, hostdata->connected,
207 "%s: phase mismatch or !DRQ\n", __func__); 190 "%s: phase mismatch or !DRQ\n", __func__);
208 NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); 191 NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
209 return -1; 192 return -1;
210} 193}
211 194
@@ -270,16 +253,15 @@ __asm__ __volatile__ \
270 : "0"(s), "1"(d), "2"(n) \ 253 : "0"(s), "1"(d), "2"(n) \
271 : "d0") 254 : "d0")
272 255
273static int macscsi_pwrite(struct Scsi_Host *instance, 256static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
274 unsigned char *src, int len) 257 unsigned char *src, int len)
275{ 258{
276 struct NCR5380_hostdata *hostdata = shost_priv(instance);
277 unsigned char *s = src; 259 unsigned char *s = src;
278 unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); 260 unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
279 int n = len; 261 int n = len;
280 int transferred; 262 int transferred;
281 263
282 while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, 264 while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
283 BASR_DRQ | BASR_PHASE_MATCH, 265 BASR_DRQ | BASR_PHASE_MATCH,
284 BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { 266 BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
285 CP_MEM_TO_IO(s, d, n); 267 CP_MEM_TO_IO(s, d, n);
@@ -288,7 +270,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
288 hostdata->pdma_residual = len - transferred; 270 hostdata->pdma_residual = len - transferred;
289 271
290 /* Target changed phase early? */ 272 /* Target changed phase early? */
291 if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, 273 if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
292 BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) 274 BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
293 scmd_printk(KERN_ERR, hostdata->connected, 275 scmd_printk(KERN_ERR, hostdata->connected,
294 "%s: !REQ and !ACK\n", __func__); 276 "%s: !REQ and !ACK\n", __func__);
@@ -297,7 +279,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
297 279
298 /* No bus error. */ 280 /* No bus error. */
299 if (n == 0) { 281 if (n == 0) {
300 if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG, 282 if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
301 TCR_LAST_BYTE_SENT, 283 TCR_LAST_BYTE_SENT,
302 TCR_LAST_BYTE_SENT, HZ / 64) < 0) 284 TCR_LAST_BYTE_SENT, HZ / 64) < 0)
303 scmd_printk(KERN_ERR, hostdata->connected, 285 scmd_printk(KERN_ERR, hostdata->connected,
@@ -305,25 +287,23 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
305 return 0; 287 return 0;
306 } 288 }
307 289
308 dsprintk(NDEBUG_PSEUDO_DMA, instance, 290 dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
309 "%s: bus error (%d/%d)\n", __func__, transferred, len); 291 "%s: bus error (%d/%d)\n", __func__, transferred, len);
310 NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); 292 NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
311 s = src + transferred; 293 s = src + transferred;
312 n = len - transferred; 294 n = len - transferred;
313 } 295 }
314 296
315 scmd_printk(KERN_ERR, hostdata->connected, 297 scmd_printk(KERN_ERR, hostdata->connected,
316 "%s: phase mismatch or !DRQ\n", __func__); 298 "%s: phase mismatch or !DRQ\n", __func__);
317 NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); 299 NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
318 300
319 return -1; 301 return -1;
320} 302}
321 303
322static int macscsi_dma_xfer_len(struct Scsi_Host *instance, 304static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
323 struct scsi_cmnd *cmd) 305 struct scsi_cmnd *cmd)
324{ 306{
325 struct NCR5380_hostdata *hostdata = shost_priv(instance);
326
327 if (hostdata->flags & FLAG_NO_PSEUDO_DMA || 307 if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
328 cmd->SCp.this_residual < 16) 308 cmd->SCp.this_residual < 16)
329 return 0; 309 return 0;
@@ -331,6 +311,11 @@ static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
331 return cmd->SCp.this_residual; 311 return cmd->SCp.this_residual;
332} 312}
333 313
314static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata)
315{
316 return hostdata->pdma_residual;
317}
318
334#include "NCR5380.c" 319#include "NCR5380.c"
335 320
336#define DRV_MODULE_NAME "mac_scsi" 321#define DRV_MODULE_NAME "mac_scsi"
@@ -356,6 +341,7 @@ static struct scsi_host_template mac_scsi_template = {
356static int __init mac_scsi_probe(struct platform_device *pdev) 341static int __init mac_scsi_probe(struct platform_device *pdev)
357{ 342{
358 struct Scsi_Host *instance; 343 struct Scsi_Host *instance;
344 struct NCR5380_hostdata *hostdata;
359 int error; 345 int error;
360 int host_flags = 0; 346 int host_flags = 0;
361 struct resource *irq, *pio_mem, *pdma_mem = NULL; 347 struct resource *irq, *pio_mem, *pdma_mem = NULL;
@@ -388,17 +374,18 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
388 if (!instance) 374 if (!instance)
389 return -ENOMEM; 375 return -ENOMEM;
390 376
391 instance->base = pio_mem->start;
392 if (irq) 377 if (irq)
393 instance->irq = irq->start; 378 instance->irq = irq->start;
394 else 379 else
395 instance->irq = NO_IRQ; 380 instance->irq = NO_IRQ;
396 381
397 if (pdma_mem && setup_use_pdma) { 382 hostdata = shost_priv(instance);
398 struct NCR5380_hostdata *hostdata = shost_priv(instance); 383 hostdata->base = pio_mem->start;
384 hostdata->io = (void *)pio_mem->start;
399 385
400 hostdata->pdma_base = (unsigned char *)pdma_mem->start; 386 if (pdma_mem && setup_use_pdma)
401 } else 387 hostdata->pdma_io = (void *)pdma_mem->start;
388 else
402 host_flags |= FLAG_NO_PSEUDO_DMA; 389 host_flags |= FLAG_NO_PSEUDO_DMA;
403 390
404 host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; 391 host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3aaea713bf37..fdd519c1dd57 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
35/* 35/*
36 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
37 */ 37 */
38#define MEGASAS_VERSION "06.811.02.00-rc1" 38#define MEGASAS_VERSION "06.812.07.00-rc1"
39#define MEGASAS_RELDATE "April 12, 2016" 39#define MEGASAS_RELDATE "August 22, 2016"
40 40
41/* 41/*
42 * Device IDs 42 * Device IDs
@@ -1429,6 +1429,8 @@ enum FW_BOOT_CONTEXT {
1429#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 1429#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
1430#define MR_MAX_MSIX_REG_ARRAY 16 1430#define MR_MAX_MSIX_REG_ARRAY 16
1431#define MR_RDPQ_MODE_OFFSET 0X00800000 1431#define MR_RDPQ_MODE_OFFSET 0X00800000
1432#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
1433
1432/* 1434/*
1433* register set for both 1068 and 1078 controllers 1435* register set for both 1068 and 1078 controllers
1434* structure extended for 1078 registers 1436* structure extended for 1078 registers
@@ -2118,7 +2120,6 @@ struct megasas_instance {
2118 u32 ctrl_context_pages; 2120 u32 ctrl_context_pages;
2119 struct megasas_ctrl_info *ctrl_info; 2121 struct megasas_ctrl_info *ctrl_info;
2120 unsigned int msix_vectors; 2122 unsigned int msix_vectors;
2121 struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
2122 struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES]; 2123 struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
2123 u64 map_id; 2124 u64 map_id;
2124 u64 pd_seq_map_id; 2125 u64 pd_seq_map_id;
@@ -2140,6 +2141,7 @@ struct megasas_instance {
2140 u8 is_imr; 2141 u8 is_imr;
2141 u8 is_rdpq; 2142 u8 is_rdpq;
2142 bool dev_handle; 2143 bool dev_handle;
2144 bool fw_sync_cache_support;
2143}; 2145};
2144struct MR_LD_VF_MAP { 2146struct MR_LD_VF_MAP {
2145 u32 size; 2147 u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d8b1fbd4c8aa..6484c382f670 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,11 +1700,8 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1700 goto out_done; 1700 goto out_done;
1701 } 1701 }
1702 1702
1703 /* 1703 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) &&
1704 * FW takes care of flush cache on its own for Virtual Disk. 1704 (!instance->fw_sync_cache_support)) {
1705 * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
1706 */
1707 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
1708 scmd->result = DID_OK << 16; 1705 scmd->result = DID_OK << 16;
1709 goto out_done; 1706 goto out_done;
1710 } 1707 }
@@ -4840,7 +4837,7 @@ fail_alloc_cmds:
4840} 4837}
4841 4838
4842/* 4839/*
4843 * megasas_setup_irqs_msix - register legacy interrupts. 4840 * megasas_setup_irqs_ioapic - register legacy interrupts.
4844 * @instance: Adapter soft state 4841 * @instance: Adapter soft state
4845 * 4842 *
4846 * Do not enable interrupt, only setup ISRs. 4843 * Do not enable interrupt, only setup ISRs.
@@ -4855,8 +4852,9 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
4855 pdev = instance->pdev; 4852 pdev = instance->pdev;
4856 instance->irq_context[0].instance = instance; 4853 instance->irq_context[0].instance = instance;
4857 instance->irq_context[0].MSIxIndex = 0; 4854 instance->irq_context[0].MSIxIndex = 0;
4858 if (request_irq(pdev->irq, instance->instancet->service_isr, 4855 if (request_irq(pci_irq_vector(pdev, 0),
4859 IRQF_SHARED, "megasas", &instance->irq_context[0])) { 4856 instance->instancet->service_isr, IRQF_SHARED,
4857 "megasas", &instance->irq_context[0])) {
4860 dev_err(&instance->pdev->dev, 4858 dev_err(&instance->pdev->dev,
4861 "Failed to register IRQ from %s %d\n", 4859 "Failed to register IRQ from %s %d\n",
4862 __func__, __LINE__); 4860 __func__, __LINE__);
@@ -4877,28 +4875,23 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
4877static int 4875static int
4878megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 4876megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
4879{ 4877{
4880 int i, j, cpu; 4878 int i, j;
4881 struct pci_dev *pdev; 4879 struct pci_dev *pdev;
4882 4880
4883 pdev = instance->pdev; 4881 pdev = instance->pdev;
4884 4882
4885 /* Try MSI-x */ 4883 /* Try MSI-x */
4886 cpu = cpumask_first(cpu_online_mask);
4887 for (i = 0; i < instance->msix_vectors; i++) { 4884 for (i = 0; i < instance->msix_vectors; i++) {
4888 instance->irq_context[i].instance = instance; 4885 instance->irq_context[i].instance = instance;
4889 instance->irq_context[i].MSIxIndex = i; 4886 instance->irq_context[i].MSIxIndex = i;
4890 if (request_irq(instance->msixentry[i].vector, 4887 if (request_irq(pci_irq_vector(pdev, i),
4891 instance->instancet->service_isr, 0, "megasas", 4888 instance->instancet->service_isr, 0, "megasas",
4892 &instance->irq_context[i])) { 4889 &instance->irq_context[i])) {
4893 dev_err(&instance->pdev->dev, 4890 dev_err(&instance->pdev->dev,
4894 "Failed to register IRQ for vector %d.\n", i); 4891 "Failed to register IRQ for vector %d.\n", i);
4895 for (j = 0; j < i; j++) { 4892 for (j = 0; j < i; j++)
4896 if (smp_affinity_enable) 4893 free_irq(pci_irq_vector(pdev, j),
4897 irq_set_affinity_hint( 4894 &instance->irq_context[j]);
4898 instance->msixentry[j].vector, NULL);
4899 free_irq(instance->msixentry[j].vector,
4900 &instance->irq_context[j]);
4901 }
4902 /* Retry irq register for IO_APIC*/ 4895 /* Retry irq register for IO_APIC*/
4903 instance->msix_vectors = 0; 4896 instance->msix_vectors = 0;
4904 if (is_probe) 4897 if (is_probe)
@@ -4906,14 +4899,6 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
4906 else 4899 else
4907 return -1; 4900 return -1;
4908 } 4901 }
4909 if (smp_affinity_enable) {
4910 if (irq_set_affinity_hint(instance->msixentry[i].vector,
4911 get_cpu_mask(cpu)))
4912 dev_err(&instance->pdev->dev,
4913 "Failed to set affinity hint"
4914 " for cpu %d\n", cpu);
4915 cpu = cpumask_next(cpu, cpu_online_mask);
4916 }
4917 } 4902 }
4918 return 0; 4903 return 0;
4919} 4904}
@@ -4930,14 +4915,12 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
4930 4915
4931 if (instance->msix_vectors) 4916 if (instance->msix_vectors)
4932 for (i = 0; i < instance->msix_vectors; i++) { 4917 for (i = 0; i < instance->msix_vectors; i++) {
4933 if (smp_affinity_enable) 4918 free_irq(pci_irq_vector(instance->pdev, i),
4934 irq_set_affinity_hint(
4935 instance->msixentry[i].vector, NULL);
4936 free_irq(instance->msixentry[i].vector,
4937 &instance->irq_context[i]); 4919 &instance->irq_context[i]);
4938 } 4920 }
4939 else 4921 else
4940 free_irq(instance->pdev->irq, &instance->irq_context[0]); 4922 free_irq(pci_irq_vector(instance->pdev, 0),
4923 &instance->irq_context[0]);
4941} 4924}
4942 4925
4943/** 4926/**
@@ -5095,6 +5078,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
5095 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 5078 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5096 0x4000000) >> 0x1a; 5079 0x4000000) >> 0x1a;
5097 if (msix_enable && !msix_disable) { 5080 if (msix_enable && !msix_disable) {
5081 int irq_flags = PCI_IRQ_MSIX;
5082
5098 scratch_pad_2 = readl 5083 scratch_pad_2 = readl
5099 (&instance->reg_set->outbound_scratch_pad_2); 5084 (&instance->reg_set->outbound_scratch_pad_2);
5100 /* Check max MSI-X vectors */ 5085 /* Check max MSI-X vectors */
@@ -5131,15 +5116,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
5131 /* Don't bother allocating more MSI-X vectors than cpus */ 5116 /* Don't bother allocating more MSI-X vectors than cpus */
5132 instance->msix_vectors = min(instance->msix_vectors, 5117 instance->msix_vectors = min(instance->msix_vectors,
5133 (unsigned int)num_online_cpus()); 5118 (unsigned int)num_online_cpus());
5134 for (i = 0; i < instance->msix_vectors; i++) 5119 if (smp_affinity_enable)
5135 instance->msixentry[i].entry = i; 5120 irq_flags |= PCI_IRQ_AFFINITY;
5136 i = pci_enable_msix_range(instance->pdev, instance->msixentry, 5121 i = pci_alloc_irq_vectors(instance->pdev, 1,
5137 1, instance->msix_vectors); 5122 instance->msix_vectors, irq_flags);
5138 if (i > 0) 5123 if (i > 0)
5139 instance->msix_vectors = i; 5124 instance->msix_vectors = i;
5140 else 5125 else
5141 instance->msix_vectors = 0; 5126 instance->msix_vectors = 0;
5142 } 5127 }
5128 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5129 if (i < 0)
5130 goto fail_setup_irqs;
5143 5131
5144 dev_info(&instance->pdev->dev, 5132 dev_info(&instance->pdev->dev,
5145 "firmware supports msix\t: (%d)", fw_msix_count); 5133 "firmware supports msix\t: (%d)", fw_msix_count);
@@ -5152,11 +5140,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
5152 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5140 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5153 (unsigned long)instance); 5141 (unsigned long)instance);
5154 5142
5155 if (instance->msix_vectors ?
5156 megasas_setup_irqs_msix(instance, 1) :
5157 megasas_setup_irqs_ioapic(instance))
5158 goto fail_setup_irqs;
5159
5160 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info), 5143 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
5161 GFP_KERNEL); 5144 GFP_KERNEL);
5162 if (instance->ctrl_info == NULL) 5145 if (instance->ctrl_info == NULL)
@@ -5172,6 +5155,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
5172 if (instance->instancet->init_adapter(instance)) 5155 if (instance->instancet->init_adapter(instance))
5173 goto fail_init_adapter; 5156 goto fail_init_adapter;
5174 5157
5158 if (instance->msix_vectors ?
5159 megasas_setup_irqs_msix(instance, 1) :
5160 megasas_setup_irqs_ioapic(instance))
5161 goto fail_init_adapter;
5175 5162
5176 instance->instancet->enable_intr(instance); 5163 instance->instancet->enable_intr(instance);
5177 5164
@@ -5315,7 +5302,7 @@ fail_init_adapter:
5315 megasas_destroy_irqs(instance); 5302 megasas_destroy_irqs(instance);
5316fail_setup_irqs: 5303fail_setup_irqs:
5317 if (instance->msix_vectors) 5304 if (instance->msix_vectors)
5318 pci_disable_msix(instance->pdev); 5305 pci_free_irq_vectors(instance->pdev);
5319 instance->msix_vectors = 0; 5306 instance->msix_vectors = 0;
5320fail_ready_state: 5307fail_ready_state:
5321 kfree(instance->ctrl_info); 5308 kfree(instance->ctrl_info);
@@ -5584,7 +5571,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
5584 /* 5571 /*
5585 * Export parameters required by SCSI mid-layer 5572 * Export parameters required by SCSI mid-layer
5586 */ 5573 */
5587 host->irq = instance->pdev->irq;
5588 host->unique_id = instance->unique_id; 5574 host->unique_id = instance->unique_id;
5589 host->can_queue = instance->max_scsi_cmds; 5575 host->can_queue = instance->max_scsi_cmds;
5590 host->this_id = instance->init_id; 5576 host->this_id = instance->init_id;
@@ -5947,7 +5933,7 @@ fail_io_attach:
5947 else 5933 else
5948 megasas_release_mfi(instance); 5934 megasas_release_mfi(instance);
5949 if (instance->msix_vectors) 5935 if (instance->msix_vectors)
5950 pci_disable_msix(instance->pdev); 5936 pci_free_irq_vectors(instance->pdev);
5951fail_init_mfi: 5937fail_init_mfi:
5952fail_alloc_dma_buf: 5938fail_alloc_dma_buf:
5953 if (instance->evt_detail) 5939 if (instance->evt_detail)
@@ -6105,7 +6091,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6105 megasas_destroy_irqs(instance); 6091 megasas_destroy_irqs(instance);
6106 6092
6107 if (instance->msix_vectors) 6093 if (instance->msix_vectors)
6108 pci_disable_msix(instance->pdev); 6094 pci_free_irq_vectors(instance->pdev);
6109 6095
6110 pci_save_state(pdev); 6096 pci_save_state(pdev);
6111 pci_disable_device(pdev); 6097 pci_disable_device(pdev);
@@ -6125,6 +6111,7 @@ megasas_resume(struct pci_dev *pdev)
6125 int rval; 6111 int rval;
6126 struct Scsi_Host *host; 6112 struct Scsi_Host *host;
6127 struct megasas_instance *instance; 6113 struct megasas_instance *instance;
6114 int irq_flags = PCI_IRQ_LEGACY;
6128 6115
6129 instance = pci_get_drvdata(pdev); 6116 instance = pci_get_drvdata(pdev);
6130 host = instance->host; 6117 host = instance->host;
@@ -6160,9 +6147,15 @@ megasas_resume(struct pci_dev *pdev)
6160 goto fail_ready_state; 6147 goto fail_ready_state;
6161 6148
6162 /* Now re-enable MSI-X */ 6149 /* Now re-enable MSI-X */
6163 if (instance->msix_vectors && 6150 if (instance->msix_vectors) {
6164 pci_enable_msix_exact(instance->pdev, instance->msixentry, 6151 irq_flags = PCI_IRQ_MSIX;
6165 instance->msix_vectors)) 6152 if (smp_affinity_enable)
6153 irq_flags |= PCI_IRQ_AFFINITY;
6154 }
6155 rval = pci_alloc_irq_vectors(instance->pdev, 1,
6156 instance->msix_vectors ?
6157 instance->msix_vectors : 1, irq_flags);
6158 if (rval < 0)
6166 goto fail_reenable_msix; 6159 goto fail_reenable_msix;
6167 6160
6168 if (instance->ctrl_context) { 6161 if (instance->ctrl_context) {
@@ -6245,6 +6238,34 @@ fail_reenable_msix:
6245#define megasas_resume NULL 6238#define megasas_resume NULL
6246#endif 6239#endif
6247 6240
6241static inline int
6242megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6243{
6244 int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6245 int i;
6246
6247 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6248 return 1;
6249
6250 for (i = 0; i < wait_time; i++) {
6251 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
6252 break;
6253
6254 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6255 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6256
6257 msleep(1000);
6258 }
6259
6260 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6261 dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
6262 __func__);
6263 return 1;
6264 }
6265
6266 return 0;
6267}
6268
6248/** 6269/**
6249 * megasas_detach_one - PCI hot"un"plug entry point 6270 * megasas_detach_one - PCI hot"un"plug entry point
6250 * @pdev: PCI device structure 6271 * @pdev: PCI device structure
@@ -6269,9 +6290,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
6269 if (instance->fw_crash_state != UNAVAILABLE) 6290 if (instance->fw_crash_state != UNAVAILABLE)
6270 megasas_free_host_crash_buffer(instance); 6291 megasas_free_host_crash_buffer(instance);
6271 scsi_remove_host(instance->host); 6292 scsi_remove_host(instance->host);
6293
6294 if (megasas_wait_for_adapter_operational(instance))
6295 goto skip_firing_dcmds;
6296
6272 megasas_flush_cache(instance); 6297 megasas_flush_cache(instance);
6273 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 6298 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6274 6299
6300skip_firing_dcmds:
6275 /* cancel the delayed work if this work still in queue*/ 6301 /* cancel the delayed work if this work still in queue*/
6276 if (instance->ev != NULL) { 6302 if (instance->ev != NULL) {
6277 struct megasas_aen_event *ev = instance->ev; 6303 struct megasas_aen_event *ev = instance->ev;
@@ -6302,7 +6328,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
6302 megasas_destroy_irqs(instance); 6328 megasas_destroy_irqs(instance);
6303 6329
6304 if (instance->msix_vectors) 6330 if (instance->msix_vectors)
6305 pci_disable_msix(instance->pdev); 6331 pci_free_irq_vectors(instance->pdev);
6306 6332
6307 if (instance->ctrl_context) { 6333 if (instance->ctrl_context) {
6308 megasas_release_fusion(instance); 6334 megasas_release_fusion(instance);
@@ -6385,13 +6411,19 @@ static void megasas_shutdown(struct pci_dev *pdev)
6385 struct megasas_instance *instance = pci_get_drvdata(pdev); 6411 struct megasas_instance *instance = pci_get_drvdata(pdev);
6386 6412
6387 instance->unload = 1; 6413 instance->unload = 1;
6414
6415 if (megasas_wait_for_adapter_operational(instance))
6416 goto skip_firing_dcmds;
6417
6388 megasas_flush_cache(instance); 6418 megasas_flush_cache(instance);
6389 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 6419 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6420
6421skip_firing_dcmds:
6390 instance->instancet->disable_intr(instance); 6422 instance->instancet->disable_intr(instance);
6391 megasas_destroy_irqs(instance); 6423 megasas_destroy_irqs(instance);
6392 6424
6393 if (instance->msix_vectors) 6425 if (instance->msix_vectors)
6394 pci_disable_msix(instance->pdev); 6426 pci_free_irq_vectors(instance->pdev);
6395} 6427}
6396 6428
6397/** 6429/**
@@ -6752,8 +6784,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
6752 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 6784 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6753 spin_unlock_irqrestore(&instance->hba_lock, flags); 6785 spin_unlock_irqrestore(&instance->hba_lock, flags);
6754 6786
6755 dev_err(&instance->pdev->dev, "timed out while" 6787 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
6756 "waiting for HBA to recover\n");
6757 error = -ENODEV; 6788 error = -ENODEV;
6758 goto out_up; 6789 goto out_up;
6759 } 6790 }
@@ -6821,8 +6852,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
6821 spin_lock_irqsave(&instance->hba_lock, flags); 6852 spin_lock_irqsave(&instance->hba_lock, flags);
6822 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 6853 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6823 spin_unlock_irqrestore(&instance->hba_lock, flags); 6854 spin_unlock_irqrestore(&instance->hba_lock, flags);
6824 dev_err(&instance->pdev->dev, "timed out while waiting" 6855 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
6825 "for HBA to recover\n");
6826 return -ENODEV; 6856 return -ENODEV;
6827 } 6857 }
6828 spin_unlock_irqrestore(&instance->hba_lock, flags); 6858 spin_unlock_irqrestore(&instance->hba_lock, flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e413113c86ac..f237d0003df3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -782,7 +782,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
782 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 782 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
783 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 783 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
784 else if (raid->level == 1) { 784 else if (raid->level == 1) {
785 pd = MR_ArPdGet(arRef, physArm + 1, map); 785 physArm = physArm + 1;
786 pd = MR_ArPdGet(arRef, physArm, map);
786 if (pd != MR_PD_INVALID) 787 if (pd != MR_PD_INVALID)
787 *pDevHandle = MR_PdDevHandleGet(pd, map); 788 *pDevHandle = MR_PdDevHandleGet(pd, map);
788 } 789 }
@@ -879,7 +880,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
879 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 880 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
880 else if (raid->level == 1) { 881 else if (raid->level == 1) {
881 /* Get alternate Pd. */ 882 /* Get alternate Pd. */
882 pd = MR_ArPdGet(arRef, physArm + 1, map); 883 physArm = physArm + 1;
884 pd = MR_ArPdGet(arRef, physArm, map);
883 if (pd != MR_PD_INVALID) 885 if (pd != MR_PD_INVALID)
884 /* Get dev handle from Pd */ 886 /* Get dev handle from Pd */
885 *pDevHandle = MR_PdDevHandleGet(pd, map); 887 *pDevHandle = MR_PdDevHandleGet(pd, map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 52d8bbf7feb5..24778ba4b6e8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -748,6 +748,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
748 goto fail_fw_init; 748 goto fail_fw_init;
749 } 749 }
750 750
751 instance->fw_sync_cache_support = (scratch_pad_2 &
752 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
753 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
754 instance->fw_sync_cache_support ? "Yes" : "No");
755
751 IOCInitMessage = 756 IOCInitMessage =
752 dma_alloc_coherent(&instance->pdev->dev, 757 dma_alloc_coherent(&instance->pdev->dev,
753 sizeof(struct MPI2_IOC_INIT_REQUEST), 758 sizeof(struct MPI2_IOC_INIT_REQUEST),
@@ -2000,6 +2005,8 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
2000 io_request->DevHandle = pd_sync->seq[pd_index].devHandle; 2005 io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
2001 pRAID_Context->regLockFlags |= 2006 pRAID_Context->regLockFlags |=
2002 (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); 2007 (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
2008 pRAID_Context->Type = MPI2_TYPE_CUDA;
2009 pRAID_Context->nseg = 0x1;
2003 } else if (fusion->fast_path_io) { 2010 } else if (fusion->fast_path_io) {
2004 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 2011 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2005 pRAID_Context->configSeqNum = 0; 2012 pRAID_Context->configSeqNum = 0;
@@ -2035,12 +2042,10 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
2035 pRAID_Context->timeoutValue = 2042 pRAID_Context->timeoutValue =
2036 cpu_to_le16((os_timeout_value > timeout_limit) ? 2043 cpu_to_le16((os_timeout_value > timeout_limit) ?
2037 timeout_limit : os_timeout_value); 2044 timeout_limit : os_timeout_value);
2038 if (fusion->adapter_type == INVADER_SERIES) { 2045 if (fusion->adapter_type == INVADER_SERIES)
2039 pRAID_Context->Type = MPI2_TYPE_CUDA;
2040 pRAID_Context->nseg = 0x1;
2041 io_request->IoFlags |= 2046 io_request->IoFlags |=
2042 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2047 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2043 } 2048
2044 cmd->request_desc->SCSIIO.RequestFlags = 2049 cmd->request_desc->SCSIIO.RequestFlags =
2045 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 2050 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
2046 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2051 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -2463,12 +2468,15 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
2463 /* Start collecting crash, if DMA bit is done */ 2468 /* Start collecting crash, if DMA bit is done */
2464 if ((fw_state == MFI_STATE_FAULT) && dma_state) 2469 if ((fw_state == MFI_STATE_FAULT) && dma_state)
2465 schedule_work(&instance->crash_init); 2470 schedule_work(&instance->crash_init);
2466 else if (fw_state == MFI_STATE_FAULT) 2471 else if (fw_state == MFI_STATE_FAULT) {
2467 schedule_work(&instance->work_init); 2472 if (instance->unload == 0)
2473 schedule_work(&instance->work_init);
2474 }
2468 } else if (fw_state == MFI_STATE_FAULT) { 2475 } else if (fw_state == MFI_STATE_FAULT) {
2469 dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt" 2476 dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
2470 "for scsi%d\n", instance->host->host_no); 2477 "for scsi%d\n", instance->host->host_no);
2471 schedule_work(&instance->work_init); 2478 if (instance->unload == 0)
2479 schedule_work(&instance->work_init);
2472 } 2480 }
2473 } 2481 }
2474 2482
@@ -2823,6 +2831,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2823 dev_err(&instance->pdev->dev, "pending commands remain after waiting, " 2831 dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
2824 "will reset adapter scsi%d.\n", 2832 "will reset adapter scsi%d.\n",
2825 instance->host->host_no); 2833 instance->host->host_no);
2834 *convert = 1;
2826 retval = 1; 2835 retval = 1;
2827 } 2836 }
2828out: 2837out:
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 95356a82ee99..fa61baf7c74d 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -478,6 +478,13 @@ typedef struct _MPI2_CONFIG_REPLY {
478#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2) 478#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2)
479#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3) 479#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3)
480 480
481#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA)
482#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB)
483#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC)
484#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD)
485#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE)
486#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF)
487
481/*Manufacturing Page 0 */ 488/*Manufacturing Page 0 */
482 489
483typedef struct _MPI2_CONFIG_PAGE_MAN_0 { 490typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index a1a5ceb42ce6..f00ef88a378a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -849,7 +849,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
849 ack_request->EventContext = mpi_reply->EventContext; 849 ack_request->EventContext = mpi_reply->EventContext;
850 ack_request->VF_ID = 0; /* TODO */ 850 ack_request->VF_ID = 0; /* TODO */
851 ack_request->VP_ID = 0; 851 ack_request->VP_ID = 0;
852 mpt3sas_base_put_smid_default(ioc, smid); 852 ioc->put_smid_default(ioc, smid);
853 853
854 out: 854 out:
855 855
@@ -1078,7 +1078,7 @@ _base_interrupt(int irq, void *bus_id)
1078 * new reply host index value in ReplyPostIndex Field and msix_index 1078 * new reply host index value in ReplyPostIndex Field and msix_index
1079 * value in MSIxIndex field. 1079 * value in MSIxIndex field.
1080 */ 1080 */
1081 if (ioc->msix96_vector) 1081 if (ioc->combined_reply_queue)
1082 writel(reply_q->reply_post_host_index | ((msix_index & 7) << 1082 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1083 MPI2_RPHI_MSIX_INDEX_SHIFT), 1083 MPI2_RPHI_MSIX_INDEX_SHIFT),
1084 ioc->replyPostRegisterIndex[msix_index/8]); 1084 ioc->replyPostRegisterIndex[msix_index/8]);
@@ -1959,7 +1959,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1959{ 1959{
1960 struct msix_entry *entries, *a; 1960 struct msix_entry *entries, *a;
1961 int r; 1961 int r;
1962 int i; 1962 int i, local_max_msix_vectors;
1963 u8 try_msix = 0; 1963 u8 try_msix = 0;
1964 1964
1965 if (msix_disable == -1 || msix_disable == 0) 1965 if (msix_disable == -1 || msix_disable == 0)
@@ -1979,13 +1979,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1979 ioc->cpu_count, max_msix_vectors); 1979 ioc->cpu_count, max_msix_vectors);
1980 1980
1981 if (!ioc->rdpq_array_enable && max_msix_vectors == -1) 1981 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1982 max_msix_vectors = 8; 1982 local_max_msix_vectors = 8;
1983 else
1984 local_max_msix_vectors = max_msix_vectors;
1983 1985
1984 if (max_msix_vectors > 0) { 1986 if (local_max_msix_vectors > 0) {
1985 ioc->reply_queue_count = min_t(int, max_msix_vectors, 1987 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
1986 ioc->reply_queue_count); 1988 ioc->reply_queue_count);
1987 ioc->msix_vector_count = ioc->reply_queue_count; 1989 ioc->msix_vector_count = ioc->reply_queue_count;
1988 } else if (max_msix_vectors == 0) 1990 } else if (local_max_msix_vectors == 0)
1989 goto try_ioapic; 1991 goto try_ioapic;
1990 1992
1991 if (ioc->msix_vector_count < ioc->cpu_count) 1993 if (ioc->msix_vector_count < ioc->cpu_count)
@@ -2050,7 +2052,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2050 _base_free_irq(ioc); 2052 _base_free_irq(ioc);
2051 _base_disable_msix(ioc); 2053 _base_disable_msix(ioc);
2052 2054
2053 if (ioc->msix96_vector) { 2055 if (ioc->combined_reply_queue) {
2054 kfree(ioc->replyPostRegisterIndex); 2056 kfree(ioc->replyPostRegisterIndex);
2055 ioc->replyPostRegisterIndex = NULL; 2057 ioc->replyPostRegisterIndex = NULL;
2056 } 2058 }
@@ -2160,7 +2162,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2160 /* Use the Combined reply queue feature only for SAS3 C0 & higher 2162 /* Use the Combined reply queue feature only for SAS3 C0 & higher
2161 * revision HBAs and also only when reply queue count is greater than 8 2163 * revision HBAs and also only when reply queue count is greater than 8
2162 */ 2164 */
2163 if (ioc->msix96_vector && ioc->reply_queue_count > 8) { 2165 if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
2164 /* Determine the Supplemental Reply Post Host Index Registers 2166 /* Determine the Supplemental Reply Post Host Index Registers
2165 * Addresse. Supplemental Reply Post Host Index Registers 2167 * Addresse. Supplemental Reply Post Host Index Registers
2166 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and 2168 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
@@ -2168,7 +2170,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2168 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. 2170 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
2169 */ 2171 */
2170 ioc->replyPostRegisterIndex = kcalloc( 2172 ioc->replyPostRegisterIndex = kcalloc(
2171 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT, 2173 ioc->combined_reply_index_count,
2172 sizeof(resource_size_t *), GFP_KERNEL); 2174 sizeof(resource_size_t *), GFP_KERNEL);
2173 if (!ioc->replyPostRegisterIndex) { 2175 if (!ioc->replyPostRegisterIndex) {
2174 dfailprintk(ioc, printk(MPT3SAS_FMT 2176 dfailprintk(ioc, printk(MPT3SAS_FMT
@@ -2178,14 +2180,14 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2178 goto out_fail; 2180 goto out_fail;
2179 } 2181 }
2180 2182
2181 for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) { 2183 for (i = 0; i < ioc->combined_reply_index_count; i++) {
2182 ioc->replyPostRegisterIndex[i] = (resource_size_t *) 2184 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
2183 ((u8 *)&ioc->chip->Doorbell + 2185 ((u8 *)&ioc->chip->Doorbell +
2184 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2186 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2185 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); 2187 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
2186 } 2188 }
2187 } else 2189 } else
2188 ioc->msix96_vector = 0; 2190 ioc->combined_reply_queue = 0;
2189 2191
2190 if (ioc->is_warpdrive) { 2192 if (ioc->is_warpdrive) {
2191 ioc->reply_post_host_index[0] = (resource_size_t __iomem *) 2193 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
@@ -2462,15 +2464,15 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2462#endif 2464#endif
2463 2465
2464/** 2466/**
2465 * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware 2467 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
2466 * @ioc: per adapter object 2468 * @ioc: per adapter object
2467 * @smid: system request message index 2469 * @smid: system request message index
2468 * @handle: device handle 2470 * @handle: device handle
2469 * 2471 *
2470 * Return nothing. 2472 * Return nothing.
2471 */ 2473 */
2472void 2474static void
2473mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) 2475_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2474{ 2476{
2475 Mpi2RequestDescriptorUnion_t descriptor; 2477 Mpi2RequestDescriptorUnion_t descriptor;
2476 u64 *request = (u64 *)&descriptor; 2478 u64 *request = (u64 *)&descriptor;
@@ -2486,15 +2488,15 @@ mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2486} 2488}
2487 2489
2488/** 2490/**
2489 * mpt3sas_base_put_smid_fast_path - send fast path request to firmware 2491 * _base_put_smid_fast_path - send fast path request to firmware
2490 * @ioc: per adapter object 2492 * @ioc: per adapter object
2491 * @smid: system request message index 2493 * @smid: system request message index
2492 * @handle: device handle 2494 * @handle: device handle
2493 * 2495 *
2494 * Return nothing. 2496 * Return nothing.
2495 */ 2497 */
2496void 2498static void
2497mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, 2499_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2498 u16 handle) 2500 u16 handle)
2499{ 2501{
2500 Mpi2RequestDescriptorUnion_t descriptor; 2502 Mpi2RequestDescriptorUnion_t descriptor;
@@ -2511,14 +2513,14 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2511} 2513}
2512 2514
2513/** 2515/**
2514 * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware 2516 * _base_put_smid_hi_priority - send Task Management request to firmware
2515 * @ioc: per adapter object 2517 * @ioc: per adapter object
2516 * @smid: system request message index 2518 * @smid: system request message index
2517 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. 2519 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
2518 * Return nothing. 2520 * Return nothing.
2519 */ 2521 */
2520void 2522static void
2521mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, 2523_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2522 u16 msix_task) 2524 u16 msix_task)
2523{ 2525{
2524 Mpi2RequestDescriptorUnion_t descriptor; 2526 Mpi2RequestDescriptorUnion_t descriptor;
@@ -2535,14 +2537,14 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2535} 2537}
2536 2538
2537/** 2539/**
2538 * mpt3sas_base_put_smid_default - Default, primarily used for config pages 2540 * _base_put_smid_default - Default, primarily used for config pages
2539 * @ioc: per adapter object 2541 * @ioc: per adapter object
2540 * @smid: system request message index 2542 * @smid: system request message index
2541 * 2543 *
2542 * Return nothing. 2544 * Return nothing.
2543 */ 2545 */
2544void 2546static void
2545mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2547_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2546{ 2548{
2547 Mpi2RequestDescriptorUnion_t descriptor; 2549 Mpi2RequestDescriptorUnion_t descriptor;
2548 u64 *request = (u64 *)&descriptor; 2550 u64 *request = (u64 *)&descriptor;
@@ -2557,6 +2559,95 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2557} 2559}
2558 2560
2559/** 2561/**
2562* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
2563* Atomic Request Descriptor
2564* @ioc: per adapter object
2565* @smid: system request message index
2566* @handle: device handle, unused in this function, for function type match
2567*
2568* Return nothing.
2569*/
2570static void
2571_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2572 u16 handle)
2573{
2574 Mpi26AtomicRequestDescriptor_t descriptor;
2575 u32 *request = (u32 *)&descriptor;
2576
2577 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2578 descriptor.MSIxIndex = _base_get_msix_index(ioc);
2579 descriptor.SMID = cpu_to_le16(smid);
2580
2581 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
2582}
2583
2584/**
2585 * _base_put_smid_fast_path_atomic - send fast path request to firmware
2586 * using Atomic Request Descriptor
2587 * @ioc: per adapter object
2588 * @smid: system request message index
2589 * @handle: device handle, unused in this function, for function type match
2590 * Return nothing
2591 */
2592static void
2593_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2594 u16 handle)
2595{
2596 Mpi26AtomicRequestDescriptor_t descriptor;
2597 u32 *request = (u32 *)&descriptor;
2598
2599 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2600 descriptor.MSIxIndex = _base_get_msix_index(ioc);
2601 descriptor.SMID = cpu_to_le16(smid);
2602
2603 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
2604}
2605
2606/**
2607 * _base_put_smid_hi_priority_atomic - send Task Management request to
2608 * firmware using Atomic Request Descriptor
2609 * @ioc: per adapter object
2610 * @smid: system request message index
2611 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
2612 *
2613 * Return nothing.
2614 */
2615static void
2616_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2617 u16 msix_task)
2618{
2619 Mpi26AtomicRequestDescriptor_t descriptor;
2620 u32 *request = (u32 *)&descriptor;
2621
2622 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2623 descriptor.MSIxIndex = msix_task;
2624 descriptor.SMID = cpu_to_le16(smid);
2625
2626 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
2627}
2628
2629/**
2630 * _base_put_smid_default - Default, primarily used for config pages
2631 * use Atomic Request Descriptor
2632 * @ioc: per adapter object
2633 * @smid: system request message index
2634 *
2635 * Return nothing.
2636 */
2637static void
2638_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2639{
2640 Mpi26AtomicRequestDescriptor_t descriptor;
2641 u32 *request = (u32 *)&descriptor;
2642
2643 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2644 descriptor.MSIxIndex = _base_get_msix_index(ioc);
2645 descriptor.SMID = cpu_to_le16(smid);
2646
2647 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
2648}
2649
2650/**
2560 * _base_display_OEMs_branding - Display branding string 2651 * _base_display_OEMs_branding - Display branding string
2561 * @ioc: per adapter object 2652 * @ioc: per adapter object
2562 * 2653 *
@@ -4070,7 +4161,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
4070 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) 4161 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
4071 ioc->ioc_link_reset_in_progress = 1; 4162 ioc->ioc_link_reset_in_progress = 1;
4072 init_completion(&ioc->base_cmds.done); 4163 init_completion(&ioc->base_cmds.done);
4073 mpt3sas_base_put_smid_default(ioc, smid); 4164 ioc->put_smid_default(ioc, smid);
4074 wait_for_completion_timeout(&ioc->base_cmds.done, 4165 wait_for_completion_timeout(&ioc->base_cmds.done,
4075 msecs_to_jiffies(10000)); 4166 msecs_to_jiffies(10000));
4076 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 4167 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -4170,7 +4261,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4170 ioc->base_cmds.smid = smid; 4261 ioc->base_cmds.smid = smid;
4171 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 4262 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
4172 init_completion(&ioc->base_cmds.done); 4263 init_completion(&ioc->base_cmds.done);
4173 mpt3sas_base_put_smid_default(ioc, smid); 4264 ioc->put_smid_default(ioc, smid);
4174 wait_for_completion_timeout(&ioc->base_cmds.done, 4265 wait_for_completion_timeout(&ioc->base_cmds.done,
4175 msecs_to_jiffies(10000)); 4266 msecs_to_jiffies(10000));
4176 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4267 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -4355,6 +4446,8 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
4355 if ((facts->IOCCapabilities & 4446 if ((facts->IOCCapabilities &
4356 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE)) 4447 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
4357 ioc->rdpq_array_capable = 1; 4448 ioc->rdpq_array_capable = 1;
4449 if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
4450 ioc->atomic_desc_capable = 1;
4358 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); 4451 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
4359 facts->IOCRequestFrameSize = 4452 facts->IOCRequestFrameSize =
4360 le16_to_cpu(mpi_reply.IOCRequestFrameSize); 4453 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -4582,7 +4675,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
4582 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 4675 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4583 4676
4584 init_completion(&ioc->port_enable_cmds.done); 4677 init_completion(&ioc->port_enable_cmds.done);
4585 mpt3sas_base_put_smid_default(ioc, smid); 4678 ioc->put_smid_default(ioc, smid);
4586 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); 4679 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
4587 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { 4680 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
4588 pr_err(MPT3SAS_FMT "%s: timeout\n", 4681 pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4645,7 +4738,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
4645 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 4738 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4646 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 4739 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4647 4740
4648 mpt3sas_base_put_smid_default(ioc, smid); 4741 ioc->put_smid_default(ioc, smid);
4649 return 0; 4742 return 0;
4650} 4743}
4651 4744
@@ -4764,7 +4857,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
4764 mpi_request->EventMasks[i] = 4857 mpi_request->EventMasks[i] =
4765 cpu_to_le32(ioc->event_masks[i]); 4858 cpu_to_le32(ioc->event_masks[i]);
4766 init_completion(&ioc->base_cmds.done); 4859 init_completion(&ioc->base_cmds.done);
4767 mpt3sas_base_put_smid_default(ioc, smid); 4860 ioc->put_smid_default(ioc, smid);
4768 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 4861 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
4769 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4862 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4770 pr_err(MPT3SAS_FMT "%s: timeout\n", 4863 pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -5138,7 +5231,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
5138 5231
5139 /* initialize reply post host index */ 5232 /* initialize reply post host index */
5140 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 5233 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5141 if (ioc->msix96_vector) 5234 if (ioc->combined_reply_queue)
5142 writel((reply_q->msix_index & 7)<< 5235 writel((reply_q->msix_index & 7)<<
5143 MPI2_RPHI_MSIX_INDEX_SHIFT, 5236 MPI2_RPHI_MSIX_INDEX_SHIFT,
5144 ioc->replyPostRegisterIndex[reply_q->msix_index/8]); 5237 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
@@ -5280,9 +5373,23 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5280 ioc->build_sg = &_base_build_sg_ieee; 5373 ioc->build_sg = &_base_build_sg_ieee;
5281 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; 5374 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
5282 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); 5375 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
5376
5283 break; 5377 break;
5284 } 5378 }
5285 5379
5380 if (ioc->atomic_desc_capable) {
5381 ioc->put_smid_default = &_base_put_smid_default_atomic;
5382 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
5383 ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
5384 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
5385 } else {
5386 ioc->put_smid_default = &_base_put_smid_default;
5387 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
5388 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
5389 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
5390 }
5391
5392
5286 /* 5393 /*
5287 * These function pointers for other requests that don't 5394 * These function pointers for other requests that don't
5288 * the require IEEE scatter gather elements. 5395 * the require IEEE scatter gather elements.
@@ -5332,6 +5439,21 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5332 goto out_free_resources; 5439 goto out_free_resources;
5333 } 5440 }
5334 5441
5442 /* allocate memory for pending OS device add list */
5443 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
5444 if (ioc->facts.MaxDevHandle % 8)
5445 ioc->pend_os_device_add_sz++;
5446 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
5447 GFP_KERNEL);
5448 if (!ioc->pend_os_device_add)
5449 goto out_free_resources;
5450
5451 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
5452 ioc->device_remove_in_progress =
5453 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
5454 if (!ioc->device_remove_in_progress)
5455 goto out_free_resources;
5456
5335 ioc->fwfault_debug = mpt3sas_fwfault_debug; 5457 ioc->fwfault_debug = mpt3sas_fwfault_debug;
5336 5458
5337 /* base internal command bits */ 5459 /* base internal command bits */
@@ -5414,6 +5536,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5414 kfree(ioc->reply_post_host_index); 5536 kfree(ioc->reply_post_host_index);
5415 kfree(ioc->pd_handles); 5537 kfree(ioc->pd_handles);
5416 kfree(ioc->blocking_handles); 5538 kfree(ioc->blocking_handles);
5539 kfree(ioc->device_remove_in_progress);
5540 kfree(ioc->pend_os_device_add);
5417 kfree(ioc->tm_cmds.reply); 5541 kfree(ioc->tm_cmds.reply);
5418 kfree(ioc->transport_cmds.reply); 5542 kfree(ioc->transport_cmds.reply);
5419 kfree(ioc->scsih_cmds.reply); 5543 kfree(ioc->scsih_cmds.reply);
@@ -5455,6 +5579,8 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
5455 kfree(ioc->reply_post_host_index); 5579 kfree(ioc->reply_post_host_index);
5456 kfree(ioc->pd_handles); 5580 kfree(ioc->pd_handles);
5457 kfree(ioc->blocking_handles); 5581 kfree(ioc->blocking_handles);
5582 kfree(ioc->device_remove_in_progress);
5583 kfree(ioc->pend_os_device_add);
5458 kfree(ioc->pfacts); 5584 kfree(ioc->pfacts);
5459 kfree(ioc->ctl_cmds.reply); 5585 kfree(ioc->ctl_cmds.reply);
5460 kfree(ioc->ctl_cmds.sense); 5586 kfree(ioc->ctl_cmds.sense);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 3e71bc1b4a80..8de0eda8cd00 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
73#define MPT3SAS_DRIVER_NAME "mpt3sas" 73#define MPT3SAS_DRIVER_NAME "mpt3sas"
74#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 74#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
75#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 75#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
76#define MPT3SAS_DRIVER_VERSION "13.100.00.00" 76#define MPT3SAS_DRIVER_VERSION "14.101.00.00"
77#define MPT3SAS_MAJOR_VERSION 13 77#define MPT3SAS_MAJOR_VERSION 14
78#define MPT3SAS_MINOR_VERSION 100 78#define MPT3SAS_MINOR_VERSION 101
79#define MPT3SAS_BUILD_VERSION 0 79#define MPT3SAS_BUILD_VERSION 0
80#define MPT3SAS_RELEASE_VERSION 00 80#define MPT3SAS_RELEASE_VERSION 00
81 81
@@ -300,8 +300,9 @@
300 * There are twelve Supplemental Reply Post Host Index Registers 300 * There are twelve Supplemental Reply Post Host Index Registers
301 * and each register is at offset 0x10 bytes from the previous one. 301 * and each register is at offset 0x10 bytes from the previous one.
302 */ 302 */
303#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12 303#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
304#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) 304#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
305#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
305 306
306/* OEM Identifiers */ 307/* OEM Identifiers */
307#define MFG10_OEM_ID_INVALID (0x00000000) 308#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -375,7 +376,6 @@ struct MPT3SAS_TARGET {
375 * per device private data 376 * per device private data
376 */ 377 */
377#define MPT_DEVICE_FLAGS_INIT 0x01 378#define MPT_DEVICE_FLAGS_INIT 0x01
378#define MPT_DEVICE_TLR_ON 0x02
379 379
380#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003) 380#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003)
381#define MFG_PAGE10_HIDE_ALL_DISKS (0x00) 381#define MFG_PAGE10_HIDE_ALL_DISKS (0x00)
@@ -736,7 +736,10 @@ typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
736typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc, 736typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
737 void *paddr); 737 void *paddr);
738 738
739 739/* To support atomic and non atomic descriptors*/
740typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
741 u16 funcdep);
742typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
740 743
741/* IOC Facts and Port Facts converted from little endian to cpu */ 744/* IOC Facts and Port Facts converted from little endian to cpu */
742union mpi3_version_union { 745union mpi3_version_union {
@@ -1079,6 +1082,9 @@ struct MPT3SAS_ADAPTER {
1079 void *pd_handles; 1082 void *pd_handles;
1080 u16 pd_handles_sz; 1083 u16 pd_handles_sz;
1081 1084
1085 void *pend_os_device_add;
1086 u16 pend_os_device_add_sz;
1087
1082 /* config page */ 1088 /* config page */
1083 u16 config_page_sz; 1089 u16 config_page_sz;
1084 void *config_page; 1090 void *config_page;
@@ -1156,7 +1162,8 @@ struct MPT3SAS_ADAPTER {
1156 u8 reply_queue_count; 1162 u8 reply_queue_count;
1157 struct list_head reply_queue_list; 1163 struct list_head reply_queue_list;
1158 1164
1159 u8 msix96_vector; 1165 u8 combined_reply_queue;
1166 u8 combined_reply_index_count;
1160 /* reply post register index */ 1167 /* reply post register index */
1161 resource_size_t **replyPostRegisterIndex; 1168 resource_size_t **replyPostRegisterIndex;
1162 1169
@@ -1187,6 +1194,15 @@ struct MPT3SAS_ADAPTER {
1187 struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event; 1194 struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
1188 struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi; 1195 struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
1189 struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi; 1196 struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
1197 void *device_remove_in_progress;
1198 u16 device_remove_in_progress_sz;
1199 u8 is_gen35_ioc;
1200 u8 atomic_desc_capable;
1201 PUT_SMID_IO_FP_HIP put_smid_scsi_io;
1202 PUT_SMID_IO_FP_HIP put_smid_fast_path;
1203 PUT_SMID_IO_FP_HIP put_smid_hi_priority;
1204 PUT_SMID_DEFAULT put_smid_default;
1205
1190}; 1206};
1191 1207
1192typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 1208typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1232,13 +1248,6 @@ u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
1232 1248
1233u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); 1249u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
1234void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid); 1250void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
1235void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
1236 u16 handle);
1237void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
1238 u16 handle);
1239void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
1240 u16 smid, u16 msix_task);
1241void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
1242void mpt3sas_base_initialize_callback_handler(void); 1251void mpt3sas_base_initialize_callback_handler(void);
1243u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func); 1252u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
1244void mpt3sas_base_release_callback_handler(u8 cb_idx); 1253void mpt3sas_base_release_callback_handler(u8 cb_idx);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index cebfd734fd76..dd6270125614 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -384,7 +384,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
384 memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); 384 memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
385 _config_display_some_debug(ioc, smid, "config_request", NULL); 385 _config_display_some_debug(ioc, smid, "config_request", NULL);
386 init_completion(&ioc->config_cmds.done); 386 init_completion(&ioc->config_cmds.done);
387 mpt3sas_base_put_smid_default(ioc, smid); 387 ioc->put_smid_default(ioc, smid);
388 wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); 388 wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
389 if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { 389 if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
390 pr_err(MPT3SAS_FMT "%s: timeout\n", 390 pr_err(MPT3SAS_FMT "%s: timeout\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 26cdc127ac89..050bd788ad02 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -654,6 +654,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
654 size_t data_in_sz = 0; 654 size_t data_in_sz = 0;
655 long ret; 655 long ret;
656 u16 wait_state_count; 656 u16 wait_state_count;
657 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
657 658
658 issue_reset = 0; 659 issue_reset = 0;
659 660
@@ -738,10 +739,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
738 data_in_sz = karg.data_in_size; 739 data_in_sz = karg.data_in_size;
739 740
740 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 741 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
741 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 742 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
742 if (!le16_to_cpu(mpi_request->FunctionDependent1) || 743 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
743 le16_to_cpu(mpi_request->FunctionDependent1) > 744 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) {
744 ioc->facts.MaxDevHandle) { 745
746 device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
747 if (!device_handle || (device_handle >
748 ioc->facts.MaxDevHandle)) {
745 ret = -EINVAL; 749 ret = -EINVAL;
746 mpt3sas_base_free_smid(ioc, smid); 750 mpt3sas_base_free_smid(ioc, smid);
747 goto out; 751 goto out;
@@ -797,14 +801,20 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
797 scsiio_request->SenseBufferLowAddress = 801 scsiio_request->SenseBufferLowAddress =
798 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 802 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
799 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); 803 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
804 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
805 dtmprintk(ioc, pr_info(MPT3SAS_FMT
806 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
807 ioc->name, device_handle));
808 mpt3sas_base_free_smid(ioc, smid);
809 ret = -EINVAL;
810 goto out;
811 }
800 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 812 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
801 data_in_dma, data_in_sz); 813 data_in_dma, data_in_sz);
802
803 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) 814 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
804 mpt3sas_base_put_smid_scsi_io(ioc, smid, 815 ioc->put_smid_scsi_io(ioc, smid, device_handle);
805 le16_to_cpu(mpi_request->FunctionDependent1));
806 else 816 else
807 mpt3sas_base_put_smid_default(ioc, smid); 817 ioc->put_smid_default(ioc, smid);
808 break; 818 break;
809 } 819 }
810 case MPI2_FUNCTION_SCSI_TASK_MGMT: 820 case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -827,11 +837,19 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
827 } 837 }
828 } 838 }
829 839
840 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
841 dtmprintk(ioc, pr_info(MPT3SAS_FMT
842 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
843 ioc->name, device_handle));
844 mpt3sas_base_free_smid(ioc, smid);
845 ret = -EINVAL;
846 goto out;
847 }
830 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( 848 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
831 tm_request->DevHandle)); 849 tm_request->DevHandle));
832 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 850 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
833 data_in_dma, data_in_sz); 851 data_in_dma, data_in_sz);
834 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); 852 ioc->put_smid_hi_priority(ioc, smid, 0);
835 break; 853 break;
836 } 854 }
837 case MPI2_FUNCTION_SMP_PASSTHROUGH: 855 case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -862,16 +880,30 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
862 } 880 }
863 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 881 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
864 data_in_sz); 882 data_in_sz);
865 mpt3sas_base_put_smid_default(ioc, smid); 883 ioc->put_smid_default(ioc, smid);
866 break; 884 break;
867 } 885 }
868 case MPI2_FUNCTION_SATA_PASSTHROUGH: 886 case MPI2_FUNCTION_SATA_PASSTHROUGH:
887 {
888 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
889 dtmprintk(ioc, pr_info(MPT3SAS_FMT
890 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
891 ioc->name, device_handle));
892 mpt3sas_base_free_smid(ioc, smid);
893 ret = -EINVAL;
894 goto out;
895 }
896 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
897 data_in_sz);
898 ioc->put_smid_default(ioc, smid);
899 break;
900 }
869 case MPI2_FUNCTION_FW_DOWNLOAD: 901 case MPI2_FUNCTION_FW_DOWNLOAD:
870 case MPI2_FUNCTION_FW_UPLOAD: 902 case MPI2_FUNCTION_FW_UPLOAD:
871 { 903 {
872 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 904 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
873 data_in_sz); 905 data_in_sz);
874 mpt3sas_base_put_smid_default(ioc, smid); 906 ioc->put_smid_default(ioc, smid);
875 break; 907 break;
876 } 908 }
877 case MPI2_FUNCTION_TOOLBOX: 909 case MPI2_FUNCTION_TOOLBOX:
@@ -886,7 +918,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
886 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 918 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
887 data_in_dma, data_in_sz); 919 data_in_dma, data_in_sz);
888 } 920 }
889 mpt3sas_base_put_smid_default(ioc, smid); 921 ioc->put_smid_default(ioc, smid);
890 break; 922 break;
891 } 923 }
892 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 924 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
@@ -905,7 +937,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
905 default: 937 default:
906 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 938 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
907 data_in_dma, data_in_sz); 939 data_in_dma, data_in_sz);
908 mpt3sas_base_put_smid_default(ioc, smid); 940 ioc->put_smid_default(ioc, smid);
909 break; 941 break;
910 } 942 }
911 943
@@ -1064,7 +1096,10 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1064 break; 1096 break;
1065 case MPI25_VERSION: 1097 case MPI25_VERSION:
1066 case MPI26_VERSION: 1098 case MPI26_VERSION:
1067 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; 1099 if (ioc->is_gen35_ioc)
1100 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
1101 else
1102 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
1068 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); 1103 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
1069 break; 1104 break;
1070 } 1105 }
@@ -1491,7 +1526,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1491 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1526 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1492 1527
1493 init_completion(&ioc->ctl_cmds.done); 1528 init_completion(&ioc->ctl_cmds.done);
1494 mpt3sas_base_put_smid_default(ioc, smid); 1529 ioc->put_smid_default(ioc, smid);
1495 wait_for_completion_timeout(&ioc->ctl_cmds.done, 1530 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1496 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1531 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1497 1532
@@ -1838,7 +1873,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1838 mpi_request->VP_ID = 0; 1873 mpi_request->VP_ID = 0;
1839 1874
1840 init_completion(&ioc->ctl_cmds.done); 1875 init_completion(&ioc->ctl_cmds.done);
1841 mpt3sas_base_put_smid_default(ioc, smid); 1876 ioc->put_smid_default(ioc, smid);
1842 wait_for_completion_timeout(&ioc->ctl_cmds.done, 1877 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1843 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1878 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1844 1879
@@ -2105,7 +2140,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2105 mpi_request->VP_ID = 0; 2140 mpi_request->VP_ID = 0;
2106 2141
2107 init_completion(&ioc->ctl_cmds.done); 2142 init_completion(&ioc->ctl_cmds.done);
2108 mpt3sas_base_put_smid_default(ioc, smid); 2143 ioc->put_smid_default(ioc, smid);
2109 wait_for_completion_timeout(&ioc->ctl_cmds.done, 2144 wait_for_completion_timeout(&ioc->ctl_cmds.done,
2110 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 2145 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2111 2146
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 89408356d252..f3e17a8c1b07 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -143,6 +143,7 @@ struct mpt3_ioctl_pci_info {
143#define MPT2_IOCTL_INTERFACE_SAS2 (0x04) 143#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
144#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05) 144#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05)
145#define MPT3_IOCTL_INTERFACE_SAS3 (0x06) 145#define MPT3_IOCTL_INTERFACE_SAS3 (0x06)
146#define MPT3_IOCTL_INTERFACE_SAS35 (0x07)
146#define MPT2_IOCTL_VERSION_LENGTH (32) 147#define MPT2_IOCTL_VERSION_LENGTH (32)
147 148
148/** 149/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 1c4744e78173..5c8f75247d73 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -423,7 +423,7 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
423 return 0; 423 return 0;
424 } 424 }
425 425
426 /* we hit this becuase the given parent handle doesn't exist */ 426 /* we hit this because the given parent handle doesn't exist */
427 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 427 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
428 return -ENXIO; 428 return -ENXIO;
429 429
@@ -788,6 +788,11 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
788 list_add_tail(&sas_device->list, &ioc->sas_device_list); 788 list_add_tail(&sas_device->list, &ioc->sas_device_list);
789 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 789 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
790 790
791 if (ioc->hide_drives) {
792 clear_bit(sas_device->handle, ioc->pend_os_device_add);
793 return;
794 }
795
791 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 796 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
792 sas_device->sas_address_parent)) { 797 sas_device->sas_address_parent)) {
793 _scsih_sas_device_remove(ioc, sas_device); 798 _scsih_sas_device_remove(ioc, sas_device);
@@ -803,7 +808,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
803 sas_device->sas_address_parent); 808 sas_device->sas_address_parent);
804 _scsih_sas_device_remove(ioc, sas_device); 809 _scsih_sas_device_remove(ioc, sas_device);
805 } 810 }
806 } 811 } else
812 clear_bit(sas_device->handle, ioc->pend_os_device_add);
807} 813}
808 814
809/** 815/**
@@ -1517,7 +1523,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1517/* 1523/*
1518 * raid transport support - 1524 * raid transport support -
1519 * Enabled for SLES11 and newer, in older kernels the driver will panic when 1525 * Enabled for SLES11 and newer, in older kernels the driver will panic when
1520 * unloading the driver followed by a load - I beleive that the subroutine 1526 * unloading the driver followed by a load - I believe that the subroutine
1521 * raid_class_release() is not cleaning up properly. 1527 * raid_class_release() is not cleaning up properly.
1522 */ 1528 */
1523 1529
@@ -2279,7 +2285,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2279 msix_task = scsi_lookup->msix_io; 2285 msix_task = scsi_lookup->msix_io;
2280 else 2286 else
2281 msix_task = 0; 2287 msix_task = 0;
2282 mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task); 2288 ioc->put_smid_hi_priority(ioc, smid, msix_task);
2283 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 2289 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2284 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { 2290 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2285 pr_err(MPT3SAS_FMT "%s: timeout\n", 2291 pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -2837,7 +2843,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
2837 if (r == -EINVAL) 2843 if (r == -EINVAL)
2838 sdev_printk(KERN_WARNING, sdev, 2844 sdev_printk(KERN_WARNING, sdev,
2839 "device_block failed with return(%d) for handle(0x%04x)\n", 2845 "device_block failed with return(%d) for handle(0x%04x)\n",
2840 sas_device_priv_data->sas_target->handle, r); 2846 r, sas_device_priv_data->sas_target->handle);
2841} 2847}
2842 2848
2843/** 2849/**
@@ -2867,20 +2873,20 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
2867 sdev_printk(KERN_WARNING, sdev, 2873 sdev_printk(KERN_WARNING, sdev,
2868 "device_unblock failed with return(%d) for handle(0x%04x) " 2874 "device_unblock failed with return(%d) for handle(0x%04x) "
2869 "performing a block followed by an unblock\n", 2875 "performing a block followed by an unblock\n",
2870 sas_device_priv_data->sas_target->handle, r); 2876 r, sas_device_priv_data->sas_target->handle);
2871 sas_device_priv_data->block = 1; 2877 sas_device_priv_data->block = 1;
2872 r = scsi_internal_device_block(sdev); 2878 r = scsi_internal_device_block(sdev);
2873 if (r) 2879 if (r)
2874 sdev_printk(KERN_WARNING, sdev, "retried device_block " 2880 sdev_printk(KERN_WARNING, sdev, "retried device_block "
2875 "failed with return(%d) for handle(0x%04x)\n", 2881 "failed with return(%d) for handle(0x%04x)\n",
2876 sas_device_priv_data->sas_target->handle, r); 2882 r, sas_device_priv_data->sas_target->handle);
2877 2883
2878 sas_device_priv_data->block = 0; 2884 sas_device_priv_data->block = 0;
2879 r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); 2885 r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
2880 if (r) 2886 if (r)
2881 sdev_printk(KERN_WARNING, sdev, "retried device_unblock" 2887 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
2882 " failed with return(%d) for handle(0x%04x)\n", 2888 " failed with return(%d) for handle(0x%04x)\n",
2883 sas_device_priv_data->sas_target->handle, r); 2889 r, sas_device_priv_data->sas_target->handle);
2884 } 2890 }
2885} 2891}
2886 2892
@@ -2942,7 +2948,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
2942 * @ioc: per adapter object 2948 * @ioc: per adapter object
2943 * @handle: device handle 2949 * @handle: device handle
2944 * 2950 *
2945 * During device pull we need to appropiately set the sdev state. 2951 * During device pull we need to appropriately set the sdev state.
2946 */ 2952 */
2947static void 2953static void
2948_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) 2954_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
@@ -2971,7 +2977,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
2971 * @ioc: per adapter object 2977 * @ioc: per adapter object
2972 * @handle: device handle 2978 * @handle: device handle
2973 * 2979 *
2974 * During device pull we need to appropiately set the sdev state. 2980 * During device pull we need to appropriately set the sdev state.
2975 */ 2981 */
2976static void 2982static void
2977_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2983_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -3138,6 +3144,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3138 if (test_bit(handle, ioc->pd_handles)) 3144 if (test_bit(handle, ioc->pd_handles))
3139 return; 3145 return;
3140 3146
3147 clear_bit(handle, ioc->pend_os_device_add);
3148
3141 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3149 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3142 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 3150 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3143 if (sas_device && sas_device->starget && 3151 if (sas_device && sas_device->starget &&
@@ -3192,7 +3200,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3192 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3200 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3193 mpi_request->DevHandle = cpu_to_le16(handle); 3201 mpi_request->DevHandle = cpu_to_le16(handle);
3194 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3202 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3195 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); 3203 set_bit(handle, ioc->device_remove_in_progress);
3204 ioc->put_smid_hi_priority(ioc, smid, 0);
3196 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); 3205 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3197 3206
3198out: 3207out:
@@ -3291,7 +3300,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3291 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 3300 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3292 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 3301 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3293 mpi_request->DevHandle = mpi_request_tm->DevHandle; 3302 mpi_request->DevHandle = mpi_request_tm->DevHandle;
3294 mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl); 3303 ioc->put_smid_default(ioc, smid_sas_ctrl);
3295 3304
3296 return _scsih_check_for_pending_tm(ioc, smid); 3305 return _scsih_check_for_pending_tm(ioc, smid);
3297} 3306}
@@ -3326,6 +3335,11 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3326 ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid, 3335 ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
3327 le16_to_cpu(mpi_reply->IOCStatus), 3336 le16_to_cpu(mpi_reply->IOCStatus),
3328 le32_to_cpu(mpi_reply->IOCLogInfo))); 3337 le32_to_cpu(mpi_reply->IOCLogInfo)));
3338 if (le16_to_cpu(mpi_reply->IOCStatus) ==
3339 MPI2_IOCSTATUS_SUCCESS) {
3340 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3341 ioc->device_remove_in_progress);
3342 }
3329 } else { 3343 } else {
3330 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 3344 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
3331 ioc->name, __FILE__, __LINE__, __func__); 3345 ioc->name, __FILE__, __LINE__, __func__);
@@ -3381,7 +3395,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3381 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3395 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3382 mpi_request->DevHandle = cpu_to_le16(handle); 3396 mpi_request->DevHandle = cpu_to_le16(handle);
3383 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3397 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3384 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); 3398 ioc->put_smid_hi_priority(ioc, smid, 0);
3385} 3399}
3386 3400
3387/** 3401/**
@@ -3473,7 +3487,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
3473 ack_request->EventContext = event_context; 3487 ack_request->EventContext = event_context;
3474 ack_request->VF_ID = 0; /* TODO */ 3488 ack_request->VF_ID = 0; /* TODO */
3475 ack_request->VP_ID = 0; 3489 ack_request->VP_ID = 0;
3476 mpt3sas_base_put_smid_default(ioc, smid); 3490 ioc->put_smid_default(ioc, smid);
3477} 3491}
3478 3492
3479/** 3493/**
@@ -3530,7 +3544,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
3530 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 3544 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3531 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 3545 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3532 mpi_request->DevHandle = handle; 3546 mpi_request->DevHandle = handle;
3533 mpt3sas_base_put_smid_default(ioc, smid); 3547 ioc->put_smid_default(ioc, smid);
3534} 3548}
3535 3549
3536/** 3550/**
@@ -3930,7 +3944,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
3930 * _scsih_setup_eedp - setup MPI request for EEDP transfer 3944 * _scsih_setup_eedp - setup MPI request for EEDP transfer
3931 * @ioc: per adapter object 3945 * @ioc: per adapter object
3932 * @scmd: pointer to scsi command object 3946 * @scmd: pointer to scsi command object
3933 * @mpi_request: pointer to the SCSI_IO reqest message frame 3947 * @mpi_request: pointer to the SCSI_IO request message frame
3934 * 3948 *
3935 * Supporting protection 1 and 3. 3949 * Supporting protection 1 and 3.
3936 * 3950 *
@@ -3983,6 +3997,9 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3983 3997
3984 mpi_request_3v->EEDPBlockSize = 3998 mpi_request_3v->EEDPBlockSize =
3985 cpu_to_le16(scmd->device->sector_size); 3999 cpu_to_le16(scmd->device->sector_size);
4000
4001 if (ioc->is_gen35_ioc)
4002 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
3986 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); 4003 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
3987} 4004}
3988 4005
@@ -4084,7 +4101,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4084 scmd->result = DID_NO_CONNECT << 16; 4101 scmd->result = DID_NO_CONNECT << 16;
4085 scmd->scsi_done(scmd); 4102 scmd->scsi_done(scmd);
4086 return 0; 4103 return 0;
4087 /* device busy with task managment */ 4104 /* device busy with task management */
4088 } else if (sas_target_priv_data->tm_busy || 4105 } else if (sas_target_priv_data->tm_busy ||
4089 sas_device_priv_data->block) 4106 sas_device_priv_data->block)
4090 return SCSI_MLQUEUE_DEVICE_BUSY; 4107 return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -4154,12 +4171,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4154 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { 4171 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
4155 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 4172 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
4156 MPI25_SCSIIO_IOFLAGS_FAST_PATH); 4173 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
4157 mpt3sas_base_put_smid_fast_path(ioc, smid, handle); 4174 ioc->put_smid_fast_path(ioc, smid, handle);
4158 } else 4175 } else
4159 mpt3sas_base_put_smid_scsi_io(ioc, smid, 4176 ioc->put_smid_scsi_io(ioc, smid,
4160 le16_to_cpu(mpi_request->DevHandle)); 4177 le16_to_cpu(mpi_request->DevHandle));
4161 } else 4178 } else
4162 mpt3sas_base_put_smid_default(ioc, smid); 4179 ioc->put_smid_default(ioc, smid);
4163 return 0; 4180 return 0;
4164 4181
4165 out: 4182 out:
@@ -4658,7 +4675,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4658 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 4675 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4659 mpi_request->DevHandle = 4676 mpi_request->DevHandle =
4660 cpu_to_le16(sas_device_priv_data->sas_target->handle); 4677 cpu_to_le16(sas_device_priv_data->sas_target->handle);
4661 mpt3sas_base_put_smid_scsi_io(ioc, smid, 4678 ioc->put_smid_scsi_io(ioc, smid,
4662 sas_device_priv_data->sas_target->handle); 4679 sas_device_priv_data->sas_target->handle);
4663 return 0; 4680 return 0;
4664 } 4681 }
@@ -5383,10 +5400,10 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5383 sas_device->handle, handle); 5400 sas_device->handle, handle);
5384 sas_target_priv_data->handle = handle; 5401 sas_target_priv_data->handle = handle;
5385 sas_device->handle = handle; 5402 sas_device->handle = handle;
5386 if (sas_device_pg0.Flags & 5403 if (le16_to_cpu(sas_device_pg0.Flags) &
5387 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 5404 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5388 sas_device->enclosure_level = 5405 sas_device->enclosure_level =
5389 le16_to_cpu(sas_device_pg0.EnclosureLevel); 5406 sas_device_pg0.EnclosureLevel;
5390 memcpy(sas_device->connector_name, 5407 memcpy(sas_device->connector_name,
5391 sas_device_pg0.ConnectorName, 4); 5408 sas_device_pg0.ConnectorName, 4);
5392 sas_device->connector_name[4] = '\0'; 5409 sas_device->connector_name[4] = '\0';
@@ -5465,6 +5482,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
5465 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 5482 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
5466 if (!(_scsih_is_end_device(device_info))) 5483 if (!(_scsih_is_end_device(device_info)))
5467 return -1; 5484 return -1;
5485 set_bit(handle, ioc->pend_os_device_add);
5468 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 5486 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5469 5487
5470 /* check if device is present */ 5488 /* check if device is present */
@@ -5483,6 +5501,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
5483 sas_device = mpt3sas_get_sdev_by_addr(ioc, 5501 sas_device = mpt3sas_get_sdev_by_addr(ioc,
5484 sas_address); 5502 sas_address);
5485 if (sas_device) { 5503 if (sas_device) {
5504 clear_bit(handle, ioc->pend_os_device_add);
5486 sas_device_put(sas_device); 5505 sas_device_put(sas_device);
5487 return -1; 5506 return -1;
5488 } 5507 }
@@ -5513,9 +5532,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
5513 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & 5532 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
5514 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 5533 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
5515 5534
5516 if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 5535 if (le16_to_cpu(sas_device_pg0.Flags)
5536 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5517 sas_device->enclosure_level = 5537 sas_device->enclosure_level =
5518 le16_to_cpu(sas_device_pg0.EnclosureLevel); 5538 sas_device_pg0.EnclosureLevel;
5519 memcpy(sas_device->connector_name, 5539 memcpy(sas_device->connector_name,
5520 sas_device_pg0.ConnectorName, 4); 5540 sas_device_pg0.ConnectorName, 4);
5521 sas_device->connector_name[4] = '\0'; 5541 sas_device->connector_name[4] = '\0';
@@ -5806,6 +5826,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
5806 _scsih_check_device(ioc, sas_address, handle, 5826 _scsih_check_device(ioc, sas_address, handle,
5807 phy_number, link_rate); 5827 phy_number, link_rate);
5808 5828
5829 if (!test_bit(handle, ioc->pend_os_device_add))
5830 break;
5831
5809 5832
5810 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 5833 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
5811 5834
@@ -6267,7 +6290,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
6267 handle, phys_disk_num)); 6290 handle, phys_disk_num));
6268 6291
6269 init_completion(&ioc->scsih_cmds.done); 6292 init_completion(&ioc->scsih_cmds.done);
6270 mpt3sas_base_put_smid_default(ioc, smid); 6293 ioc->put_smid_default(ioc, smid);
6271 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 6294 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
6272 6295
6273 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 6296 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -6320,7 +6343,7 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
6320{ 6343{
6321 sdev->no_uld_attach = no_uld_attach ? 1 : 0; 6344 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
6322 sdev_printk(KERN_INFO, sdev, "%s raid component\n", 6345 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
6323 sdev->no_uld_attach ? "hidding" : "exposing"); 6346 sdev->no_uld_attach ? "hiding" : "exposing");
6324 WARN_ON(scsi_device_reprobe(sdev)); 6347 WARN_ON(scsi_device_reprobe(sdev));
6325} 6348}
6326 6349
@@ -7050,7 +7073,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
7050 if (sas_device_pg0->Flags & 7073 if (sas_device_pg0->Flags &
7051 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7074 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7052 sas_device->enclosure_level = 7075 sas_device->enclosure_level =
7053 le16_to_cpu(sas_device_pg0->EnclosureLevel); 7076 sas_device_pg0->EnclosureLevel;
7054 memcpy(&sas_device->connector_name[0], 7077 memcpy(&sas_device->connector_name[0],
7055 &sas_device_pg0->ConnectorName[0], 4); 7078 &sas_device_pg0->ConnectorName[0], 4);
7056 } else { 7079 } else {
@@ -7112,6 +7135,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
7112 sas_device_pg0.SASAddress = 7135 sas_device_pg0.SASAddress =
7113 le64_to_cpu(sas_device_pg0.SASAddress); 7136 le64_to_cpu(sas_device_pg0.SASAddress);
7114 sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot); 7137 sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
7138 sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
7115 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); 7139 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
7116 } 7140 }
7117 7141
@@ -7723,6 +7747,9 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
7723 complete(&ioc->tm_cmds.done); 7747 complete(&ioc->tm_cmds.done);
7724 } 7748 }
7725 7749
7750 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
7751 memset(ioc->device_remove_in_progress, 0,
7752 ioc->device_remove_in_progress_sz);
7726 _scsih_fw_event_cleanup_queue(ioc); 7753 _scsih_fw_event_cleanup_queue(ioc);
7727 _scsih_flush_running_cmds(ioc); 7754 _scsih_flush_running_cmds(ioc);
7728 break; 7755 break;
@@ -8113,7 +8140,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
8113 if (!ioc->hide_ir_msg) 8140 if (!ioc->hide_ir_msg)
8114 pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name); 8141 pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
8115 init_completion(&ioc->scsih_cmds.done); 8142 init_completion(&ioc->scsih_cmds.done);
8116 mpt3sas_base_put_smid_default(ioc, smid); 8143 ioc->put_smid_default(ioc, smid);
8117 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 8144 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8118 8145
8119 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 8146 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -8654,6 +8681,12 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
8654 case MPI26_MFGPAGE_DEVID_SAS3324_2: 8681 case MPI26_MFGPAGE_DEVID_SAS3324_2:
8655 case MPI26_MFGPAGE_DEVID_SAS3324_3: 8682 case MPI26_MFGPAGE_DEVID_SAS3324_3:
8656 case MPI26_MFGPAGE_DEVID_SAS3324_4: 8683 case MPI26_MFGPAGE_DEVID_SAS3324_4:
8684 case MPI26_MFGPAGE_DEVID_SAS3508:
8685 case MPI26_MFGPAGE_DEVID_SAS3508_1:
8686 case MPI26_MFGPAGE_DEVID_SAS3408:
8687 case MPI26_MFGPAGE_DEVID_SAS3516:
8688 case MPI26_MFGPAGE_DEVID_SAS3516_1:
8689 case MPI26_MFGPAGE_DEVID_SAS3416:
8657 return MPI26_VERSION; 8690 return MPI26_VERSION;
8658 } 8691 }
8659 return 0; 8692 return 0;
@@ -8722,10 +8755,29 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8722 ioc->hba_mpi_version_belonged = hba_mpi_version; 8755 ioc->hba_mpi_version_belonged = hba_mpi_version;
8723 ioc->id = mpt3_ids++; 8756 ioc->id = mpt3_ids++;
8724 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); 8757 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
8758 switch (pdev->device) {
8759 case MPI26_MFGPAGE_DEVID_SAS3508:
8760 case MPI26_MFGPAGE_DEVID_SAS3508_1:
8761 case MPI26_MFGPAGE_DEVID_SAS3408:
8762 case MPI26_MFGPAGE_DEVID_SAS3516:
8763 case MPI26_MFGPAGE_DEVID_SAS3516_1:
8764 case MPI26_MFGPAGE_DEVID_SAS3416:
8765 ioc->is_gen35_ioc = 1;
8766 break;
8767 default:
8768 ioc->is_gen35_ioc = 0;
8769 }
8725 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && 8770 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
8726 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || 8771 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
8727 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) 8772 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
8728 ioc->msix96_vector = 1; 8773 ioc->combined_reply_queue = 1;
8774 if (ioc->is_gen35_ioc)
8775 ioc->combined_reply_index_count =
8776 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
8777 else
8778 ioc->combined_reply_index_count =
8779 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
8780 }
8729 break; 8781 break;
8730 default: 8782 default:
8731 return -ENODEV; 8783 return -ENODEV;
@@ -9128,6 +9180,19 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
9128 PCI_ANY_ID, PCI_ANY_ID }, 9180 PCI_ANY_ID, PCI_ANY_ID },
9129 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, 9181 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
9130 PCI_ANY_ID, PCI_ANY_ID }, 9182 PCI_ANY_ID, PCI_ANY_ID },
9183 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
9184 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
9185 PCI_ANY_ID, PCI_ANY_ID },
9186 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
9187 PCI_ANY_ID, PCI_ANY_ID },
9188 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
9189 PCI_ANY_ID, PCI_ANY_ID },
9190 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
9191 PCI_ANY_ID, PCI_ANY_ID },
9192 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
9193 PCI_ANY_ID, PCI_ANY_ID },
9194 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
9195 PCI_ANY_ID, PCI_ANY_ID },
9131 {0} /* Terminating entry */ 9196 {0} /* Terminating entry */
9132}; 9197};
9133MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); 9198MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
@@ -9168,7 +9233,7 @@ scsih_init(void)
9168 /* queuecommand callback hander */ 9233 /* queuecommand callback hander */
9169 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); 9234 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
9170 9235
9171 /* task managment callback handler */ 9236 /* task management callback handler */
9172 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); 9237 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
9173 9238
9174 /* base internal commands callback handler */ 9239 /* base internal commands callback handler */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index b74faf1a69b2..7f1d5785bc30 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -392,7 +392,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
392 "report_manufacture - send to sas_addr(0x%016llx)\n", 392 "report_manufacture - send to sas_addr(0x%016llx)\n",
393 ioc->name, (unsigned long long)sas_address)); 393 ioc->name, (unsigned long long)sas_address));
394 init_completion(&ioc->transport_cmds.done); 394 init_completion(&ioc->transport_cmds.done);
395 mpt3sas_base_put_smid_default(ioc, smid); 395 ioc->put_smid_default(ioc, smid);
396 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); 396 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
397 397
398 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 398 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1198,7 +1198,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1198 ioc->name, (unsigned long long)phy->identify.sas_address, 1198 ioc->name, (unsigned long long)phy->identify.sas_address,
1199 phy->number)); 1199 phy->number));
1200 init_completion(&ioc->transport_cmds.done); 1200 init_completion(&ioc->transport_cmds.done);
1201 mpt3sas_base_put_smid_default(ioc, smid); 1201 ioc->put_smid_default(ioc, smid);
1202 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); 1202 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
1203 1203
1204 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 1204 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1514,7 +1514,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1514 ioc->name, (unsigned long long)phy->identify.sas_address, 1514 ioc->name, (unsigned long long)phy->identify.sas_address,
1515 phy->number, phy_operation)); 1515 phy->number, phy_operation));
1516 init_completion(&ioc->transport_cmds.done); 1516 init_completion(&ioc->transport_cmds.done);
1517 mpt3sas_base_put_smid_default(ioc, smid); 1517 ioc->put_smid_default(ioc, smid);
1518 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); 1518 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
1519 1519
1520 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 1520 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2032,7 +2032,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2032 "%s - sending smp request\n", ioc->name, __func__)); 2032 "%s - sending smp request\n", ioc->name, __func__));
2033 2033
2034 init_completion(&ioc->transport_cmds.done); 2034 init_completion(&ioc->transport_cmds.done);
2035 mpt3sas_base_put_smid_default(ioc, smid); 2035 ioc->put_smid_default(ioc, smid);
2036 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); 2036 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
2037 2037
2038 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 2038 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 4c57d9abce7b..7de5d8d75480 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -668,7 +668,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
668{ 668{
669 u32 tmp; 669 u32 tmp;
670 tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3)); 670 tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
671 if (tmp && 1 << (slot_idx % 32)) { 671 if (tmp & 1 << (slot_idx % 32)) {
672 mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx); 672 mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
673 mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3), 673 mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
674 1 << (slot_idx % 32)); 674 1 << (slot_idx % 32));
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 845affa112f7..337982cf3d63 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3787,11 +3787,11 @@ static long pmcraid_ioctl_passthrough(
3787 direction); 3787 direction);
3788 if (rc) { 3788 if (rc) {
3789 pmcraid_err("couldn't build passthrough ioadls\n"); 3789 pmcraid_err("couldn't build passthrough ioadls\n");
3790 goto out_free_buffer; 3790 goto out_free_cmd;
3791 } 3791 }
3792 } else if (request_size < 0) { 3792 } else if (request_size < 0) {
3793 rc = -EINVAL; 3793 rc = -EINVAL;
3794 goto out_free_buffer; 3794 goto out_free_cmd;
3795 } 3795 }
3796 3796
3797 /* If data is being written into the device, copy the data from user 3797 /* If data is being written into the device, copy the data from user
@@ -3908,6 +3908,8 @@ out_handle_response:
3908 3908
3909out_free_sglist: 3909out_free_sglist:
3910 pmcraid_release_passthrough_ioadls(cmd, request_size, direction); 3910 pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
3911
3912out_free_cmd:
3911 pmcraid_return_cmd(cmd); 3913 pmcraid_return_cmd(cmd);
3912 3914
3913out_free_buffer: 3915out_free_buffer:
@@ -6018,8 +6020,10 @@ static int __init pmcraid_init(void)
6018 6020
6019 error = pmcraid_netlink_init(); 6021 error = pmcraid_netlink_init();
6020 6022
6021 if (error) 6023 if (error) {
6024 class_destroy(pmcraid_class);
6022 goto out_unreg_chrdev; 6025 goto out_unreg_chrdev;
6026 }
6023 6027
6024 error = pci_register_driver(&pmcraid_driver); 6028 error = pci_register_driver(&pmcraid_driver);
6025 6029
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 643014f82f7d..1bf8061ff803 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,4 +1,4 @@
1/* 1 /*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
@@ -9,6 +9,7 @@
9#include <linux/kthread.h> 9#include <linux/kthread.h>
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/bsg-lib.h>
12 13
13/* BSG support for ELS/CT pass through */ 14/* BSG support for ELS/CT pass through */
14void 15void
@@ -16,10 +17,12 @@ qla2x00_bsg_job_done(void *data, void *ptr, int res)
16{ 17{
17 srb_t *sp = (srb_t *)ptr; 18 srb_t *sp = (srb_t *)ptr;
18 struct scsi_qla_host *vha = (scsi_qla_host_t *)data; 19 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
19 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 20 struct bsg_job *bsg_job = sp->u.bsg_job;
21 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
20 22
21 bsg_job->reply->result = res; 23 bsg_reply->result = res;
22 bsg_job->job_done(bsg_job); 24 bsg_job_done(bsg_job, bsg_reply->result,
25 bsg_reply->reply_payload_rcv_len);
23 sp->free(vha, sp); 26 sp->free(vha, sp);
24} 27}
25 28
@@ -28,13 +31,15 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
28{ 31{
29 srb_t *sp = (srb_t *)ptr; 32 srb_t *sp = (srb_t *)ptr;
30 struct scsi_qla_host *vha = sp->fcport->vha; 33 struct scsi_qla_host *vha = sp->fcport->vha;
31 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 34 struct bsg_job *bsg_job = sp->u.bsg_job;
35 struct fc_bsg_request *bsg_request = bsg_job->request;
36
32 struct qla_hw_data *ha = vha->hw; 37 struct qla_hw_data *ha = vha->hw;
33 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 38 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
34 39
35 if (sp->type == SRB_FXIOCB_BCMD) { 40 if (sp->type == SRB_FXIOCB_BCMD) {
36 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 41 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
37 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 42 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
38 43
39 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 44 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
40 dma_unmap_sg(&ha->pdev->dev, 45 dma_unmap_sg(&ha->pdev->dev,
@@ -116,9 +121,11 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
116} 121}
117 122
118static int 123static int
119qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) 124qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
120{ 125{
121 struct Scsi_Host *host = bsg_job->shost; 126 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
127 struct fc_bsg_request *bsg_request = bsg_job->request;
128 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
122 scsi_qla_host_t *vha = shost_priv(host); 129 scsi_qla_host_t *vha = shost_priv(host);
123 struct qla_hw_data *ha = vha->hw; 130 struct qla_hw_data *ha = vha->hw;
124 int ret = 0; 131 int ret = 0;
@@ -131,7 +138,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
131 } 138 }
132 139
133 /* Get the sub command */ 140 /* Get the sub command */
134 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 141 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
135 142
136 /* Only set config is allowed if config memory is not allocated */ 143 /* Only set config is allowed if config memory is not allocated */
137 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 144 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
@@ -145,10 +152,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
145 ha->fcp_prio_cfg->attributes &= 152 ha->fcp_prio_cfg->attributes &=
146 ~FCP_PRIO_ATTR_ENABLE; 153 ~FCP_PRIO_ATTR_ENABLE;
147 qla24xx_update_all_fcp_prio(vha); 154 qla24xx_update_all_fcp_prio(vha);
148 bsg_job->reply->result = DID_OK; 155 bsg_reply->result = DID_OK;
149 } else { 156 } else {
150 ret = -EINVAL; 157 ret = -EINVAL;
151 bsg_job->reply->result = (DID_ERROR << 16); 158 bsg_reply->result = (DID_ERROR << 16);
152 goto exit_fcp_prio_cfg; 159 goto exit_fcp_prio_cfg;
153 } 160 }
154 break; 161 break;
@@ -160,10 +167,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
160 ha->fcp_prio_cfg->attributes |= 167 ha->fcp_prio_cfg->attributes |=
161 FCP_PRIO_ATTR_ENABLE; 168 FCP_PRIO_ATTR_ENABLE;
162 qla24xx_update_all_fcp_prio(vha); 169 qla24xx_update_all_fcp_prio(vha);
163 bsg_job->reply->result = DID_OK; 170 bsg_reply->result = DID_OK;
164 } else { 171 } else {
165 ret = -EINVAL; 172 ret = -EINVAL;
166 bsg_job->reply->result = (DID_ERROR << 16); 173 bsg_reply->result = (DID_ERROR << 16);
167 goto exit_fcp_prio_cfg; 174 goto exit_fcp_prio_cfg;
168 } 175 }
169 } 176 }
@@ -173,12 +180,12 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
173 len = bsg_job->reply_payload.payload_len; 180 len = bsg_job->reply_payload.payload_len;
174 if (!len || len > FCP_PRIO_CFG_SIZE) { 181 if (!len || len > FCP_PRIO_CFG_SIZE) {
175 ret = -EINVAL; 182 ret = -EINVAL;
176 bsg_job->reply->result = (DID_ERROR << 16); 183 bsg_reply->result = (DID_ERROR << 16);
177 goto exit_fcp_prio_cfg; 184 goto exit_fcp_prio_cfg;
178 } 185 }
179 186
180 bsg_job->reply->result = DID_OK; 187 bsg_reply->result = DID_OK;
181 bsg_job->reply->reply_payload_rcv_len = 188 bsg_reply->reply_payload_rcv_len =
182 sg_copy_from_buffer( 189 sg_copy_from_buffer(
183 bsg_job->reply_payload.sg_list, 190 bsg_job->reply_payload.sg_list,
184 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 191 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
@@ -189,7 +196,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
189 case QLFC_FCP_PRIO_SET_CONFIG: 196 case QLFC_FCP_PRIO_SET_CONFIG:
190 len = bsg_job->request_payload.payload_len; 197 len = bsg_job->request_payload.payload_len;
191 if (!len || len > FCP_PRIO_CFG_SIZE) { 198 if (!len || len > FCP_PRIO_CFG_SIZE) {
192 bsg_job->reply->result = (DID_ERROR << 16); 199 bsg_reply->result = (DID_ERROR << 16);
193 ret = -EINVAL; 200 ret = -EINVAL;
194 goto exit_fcp_prio_cfg; 201 goto exit_fcp_prio_cfg;
195 } 202 }
@@ -200,7 +207,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
200 ql_log(ql_log_warn, vha, 0x7050, 207 ql_log(ql_log_warn, vha, 0x7050,
201 "Unable to allocate memory for fcp prio " 208 "Unable to allocate memory for fcp prio "
202 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 209 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
203 bsg_job->reply->result = (DID_ERROR << 16); 210 bsg_reply->result = (DID_ERROR << 16);
204 ret = -ENOMEM; 211 ret = -ENOMEM;
205 goto exit_fcp_prio_cfg; 212 goto exit_fcp_prio_cfg;
206 } 213 }
@@ -215,7 +222,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
215 222
216 if (!qla24xx_fcp_prio_cfg_valid(vha, 223 if (!qla24xx_fcp_prio_cfg_valid(vha,
217 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { 224 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
218 bsg_job->reply->result = (DID_ERROR << 16); 225 bsg_reply->result = (DID_ERROR << 16);
219 ret = -EINVAL; 226 ret = -EINVAL;
220 /* If buffer was invalidatic int 227 /* If buffer was invalidatic int
221 * fcp_prio_cfg is of no use 228 * fcp_prio_cfg is of no use
@@ -229,7 +236,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
229 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 236 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
230 ha->flags.fcp_prio_enabled = 1; 237 ha->flags.fcp_prio_enabled = 1;
231 qla24xx_update_all_fcp_prio(vha); 238 qla24xx_update_all_fcp_prio(vha);
232 bsg_job->reply->result = DID_OK; 239 bsg_reply->result = DID_OK;
233 break; 240 break;
234 default: 241 default:
235 ret = -EINVAL; 242 ret = -EINVAL;
@@ -237,13 +244,15 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
237 } 244 }
238exit_fcp_prio_cfg: 245exit_fcp_prio_cfg:
239 if (!ret) 246 if (!ret)
240 bsg_job->job_done(bsg_job); 247 bsg_job_done(bsg_job, bsg_reply->result,
248 bsg_reply->reply_payload_rcv_len);
241 return ret; 249 return ret;
242} 250}
243 251
244static int 252static int
245qla2x00_process_els(struct fc_bsg_job *bsg_job) 253qla2x00_process_els(struct bsg_job *bsg_job)
246{ 254{
255 struct fc_bsg_request *bsg_request = bsg_job->request;
247 struct fc_rport *rport; 256 struct fc_rport *rport;
248 fc_port_t *fcport = NULL; 257 fc_port_t *fcport = NULL;
249 struct Scsi_Host *host; 258 struct Scsi_Host *host;
@@ -255,15 +264,15 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
255 int rval = (DRIVER_ERROR << 16); 264 int rval = (DRIVER_ERROR << 16);
256 uint16_t nextlid = 0; 265 uint16_t nextlid = 0;
257 266
258 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 267 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
259 rport = bsg_job->rport; 268 rport = fc_bsg_to_rport(bsg_job);
260 fcport = *(fc_port_t **) rport->dd_data; 269 fcport = *(fc_port_t **) rport->dd_data;
261 host = rport_to_shost(rport); 270 host = rport_to_shost(rport);
262 vha = shost_priv(host); 271 vha = shost_priv(host);
263 ha = vha->hw; 272 ha = vha->hw;
264 type = "FC_BSG_RPT_ELS"; 273 type = "FC_BSG_RPT_ELS";
265 } else { 274 } else {
266 host = bsg_job->shost; 275 host = fc_bsg_to_shost(bsg_job);
267 vha = shost_priv(host); 276 vha = shost_priv(host);
268 ha = vha->hw; 277 ha = vha->hw;
269 type = "FC_BSG_HST_ELS_NOLOGIN"; 278 type = "FC_BSG_HST_ELS_NOLOGIN";
@@ -296,7 +305,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
296 } 305 }
297 306
298 /* ELS request for rport */ 307 /* ELS request for rport */
299 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 308 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
300 /* make sure the rport is logged in, 309 /* make sure the rport is logged in,
301 * if not perform fabric login 310 * if not perform fabric login
302 */ 311 */
@@ -322,11 +331,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
322 /* Initialize all required fields of fcport */ 331 /* Initialize all required fields of fcport */
323 fcport->vha = vha; 332 fcport->vha = vha;
324 fcport->d_id.b.al_pa = 333 fcport->d_id.b.al_pa =
325 bsg_job->request->rqst_data.h_els.port_id[0]; 334 bsg_request->rqst_data.h_els.port_id[0];
326 fcport->d_id.b.area = 335 fcport->d_id.b.area =
327 bsg_job->request->rqst_data.h_els.port_id[1]; 336 bsg_request->rqst_data.h_els.port_id[1];
328 fcport->d_id.b.domain = 337 fcport->d_id.b.domain =
329 bsg_job->request->rqst_data.h_els.port_id[2]; 338 bsg_request->rqst_data.h_els.port_id[2];
330 fcport->loop_id = 339 fcport->loop_id =
331 (fcport->d_id.b.al_pa == 0xFD) ? 340 (fcport->d_id.b.al_pa == 0xFD) ?
332 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 341 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
@@ -366,11 +375,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
366 } 375 }
367 376
368 sp->type = 377 sp->type =
369 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 378 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
370 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 379 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
371 sp->name = 380 sp->name =
372 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 381 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
373 "bsg_els_rpt" : "bsg_els_hst"); 382 "bsg_els_rpt" : "bsg_els_hst");
374 sp->u.bsg_job = bsg_job; 383 sp->u.bsg_job = bsg_job;
375 sp->free = qla2x00_bsg_sp_free; 384 sp->free = qla2x00_bsg_sp_free;
376 sp->done = qla2x00_bsg_job_done; 385 sp->done = qla2x00_bsg_job_done;
@@ -378,7 +387,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
378 ql_dbg(ql_dbg_user, vha, 0x700a, 387 ql_dbg(ql_dbg_user, vha, 0x700a,
379 "bsg rqst type: %s els type: %x - loop-id=%x " 388 "bsg rqst type: %s els type: %x - loop-id=%x "
380 "portid=%-2x%02x%02x.\n", type, 389 "portid=%-2x%02x%02x.\n", type,
381 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id, 390 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
382 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 391 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
383 392
384 rval = qla2x00_start_sp(sp); 393 rval = qla2x00_start_sp(sp);
@@ -399,7 +408,7 @@ done_unmap_sg:
399 goto done_free_fcport; 408 goto done_free_fcport;
400 409
401done_free_fcport: 410done_free_fcport:
402 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) 411 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
403 kfree(fcport); 412 kfree(fcport);
404done: 413done:
405 return rval; 414 return rval;
@@ -420,10 +429,11 @@ qla24xx_calc_ct_iocbs(uint16_t dsds)
420} 429}
421 430
422static int 431static int
423qla2x00_process_ct(struct fc_bsg_job *bsg_job) 432qla2x00_process_ct(struct bsg_job *bsg_job)
424{ 433{
425 srb_t *sp; 434 srb_t *sp;
426 struct Scsi_Host *host = bsg_job->shost; 435 struct fc_bsg_request *bsg_request = bsg_job->request;
436 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
427 scsi_qla_host_t *vha = shost_priv(host); 437 scsi_qla_host_t *vha = shost_priv(host);
428 struct qla_hw_data *ha = vha->hw; 438 struct qla_hw_data *ha = vha->hw;
429 int rval = (DRIVER_ERROR << 16); 439 int rval = (DRIVER_ERROR << 16);
@@ -469,7 +479,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
469 } 479 }
470 480
471 loop_id = 481 loop_id =
472 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 482 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
473 >> 24; 483 >> 24;
474 switch (loop_id) { 484 switch (loop_id) {
475 case 0xFC: 485 case 0xFC:
@@ -500,9 +510,9 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
500 510
501 /* Initialize all required fields of fcport */ 511 /* Initialize all required fields of fcport */
502 fcport->vha = vha; 512 fcport->vha = vha;
503 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; 513 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
504 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; 514 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
505 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; 515 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
506 fcport->loop_id = loop_id; 516 fcport->loop_id = loop_id;
507 517
508 /* Alloc SRB structure */ 518 /* Alloc SRB structure */
@@ -524,7 +534,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
524 ql_dbg(ql_dbg_user, vha, 0x7016, 534 ql_dbg(ql_dbg_user, vha, 0x7016,
525 "bsg rqst type: %s else type: %x - " 535 "bsg rqst type: %s else type: %x - "
526 "loop-id=%x portid=%02x%02x%02x.\n", type, 536 "loop-id=%x portid=%02x%02x%02x.\n", type,
527 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), 537 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
528 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 538 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
529 fcport->d_id.b.al_pa); 539 fcport->d_id.b.al_pa);
530 540
@@ -697,9 +707,11 @@ done_set_internal:
697} 707}
698 708
699static int 709static int
700qla2x00_process_loopback(struct fc_bsg_job *bsg_job) 710qla2x00_process_loopback(struct bsg_job *bsg_job)
701{ 711{
702 struct Scsi_Host *host = bsg_job->shost; 712 struct fc_bsg_request *bsg_request = bsg_job->request;
713 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
714 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
703 scsi_qla_host_t *vha = shost_priv(host); 715 scsi_qla_host_t *vha = shost_priv(host);
704 struct qla_hw_data *ha = vha->hw; 716 struct qla_hw_data *ha = vha->hw;
705 int rval; 717 int rval;
@@ -780,9 +792,9 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
780 elreq.rcv_dma = rsp_data_dma; 792 elreq.rcv_dma = rsp_data_dma;
781 elreq.transfer_size = req_data_len; 793 elreq.transfer_size = req_data_len;
782 794
783 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 795 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
784 elreq.iteration_count = 796 elreq.iteration_count =
785 bsg_job->request->rqst_data.h_vendor.vendor_cmd[2]; 797 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
786 798
787 if (atomic_read(&vha->loop_state) == LOOP_READY && 799 if (atomic_read(&vha->loop_state) == LOOP_READY &&
788 (ha->current_topology == ISP_CFG_F || 800 (ha->current_topology == ISP_CFG_F ||
@@ -896,12 +908,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
896 "Vendor request %s failed.\n", type); 908 "Vendor request %s failed.\n", type);
897 909
898 rval = 0; 910 rval = 0;
899 bsg_job->reply->result = (DID_ERROR << 16); 911 bsg_reply->result = (DID_ERROR << 16);
900 bsg_job->reply->reply_payload_rcv_len = 0; 912 bsg_reply->reply_payload_rcv_len = 0;
901 } else { 913 } else {
902 ql_dbg(ql_dbg_user, vha, 0x702d, 914 ql_dbg(ql_dbg_user, vha, 0x702d,
903 "Vendor request %s completed.\n", type); 915 "Vendor request %s completed.\n", type);
904 bsg_job->reply->result = (DID_OK << 16); 916 bsg_reply->result = (DID_OK << 16);
905 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 917 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
906 bsg_job->reply_payload.sg_cnt, rsp_data, 918 bsg_job->reply_payload.sg_cnt, rsp_data,
907 rsp_data_len); 919 rsp_data_len);
@@ -930,14 +942,17 @@ done_unmap_req_sg:
930 bsg_job->request_payload.sg_list, 942 bsg_job->request_payload.sg_list,
931 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 943 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
932 if (!rval) 944 if (!rval)
933 bsg_job->job_done(bsg_job); 945 bsg_job_done(bsg_job, bsg_reply->result,
946 bsg_reply->reply_payload_rcv_len);
934 return rval; 947 return rval;
935} 948}
936 949
937static int 950static int
938qla84xx_reset(struct fc_bsg_job *bsg_job) 951qla84xx_reset(struct bsg_job *bsg_job)
939{ 952{
940 struct Scsi_Host *host = bsg_job->shost; 953 struct fc_bsg_request *bsg_request = bsg_job->request;
954 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
955 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
941 scsi_qla_host_t *vha = shost_priv(host); 956 scsi_qla_host_t *vha = shost_priv(host);
942 struct qla_hw_data *ha = vha->hw; 957 struct qla_hw_data *ha = vha->hw;
943 int rval = 0; 958 int rval = 0;
@@ -948,7 +963,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
948 return -EINVAL; 963 return -EINVAL;
949 } 964 }
950 965
951 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 966 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
952 967
953 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 968 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
954 969
@@ -960,17 +975,20 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
960 } else { 975 } else {
961 ql_dbg(ql_dbg_user, vha, 0x7031, 976 ql_dbg(ql_dbg_user, vha, 0x7031,
962 "Vendor request 84xx reset completed.\n"); 977 "Vendor request 84xx reset completed.\n");
963 bsg_job->reply->result = DID_OK; 978 bsg_reply->result = DID_OK;
964 bsg_job->job_done(bsg_job); 979 bsg_job_done(bsg_job, bsg_reply->result,
980 bsg_reply->reply_payload_rcv_len);
965 } 981 }
966 982
967 return rval; 983 return rval;
968} 984}
969 985
970static int 986static int
971qla84xx_updatefw(struct fc_bsg_job *bsg_job) 987qla84xx_updatefw(struct bsg_job *bsg_job)
972{ 988{
973 struct Scsi_Host *host = bsg_job->shost; 989 struct fc_bsg_request *bsg_request = bsg_job->request;
990 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
991 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
974 scsi_qla_host_t *vha = shost_priv(host); 992 scsi_qla_host_t *vha = shost_priv(host);
975 struct qla_hw_data *ha = vha->hw; 993 struct qla_hw_data *ha = vha->hw;
976 struct verify_chip_entry_84xx *mn = NULL; 994 struct verify_chip_entry_84xx *mn = NULL;
@@ -1027,7 +1045,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
1027 goto done_free_fw_buf; 1045 goto done_free_fw_buf;
1028 } 1046 }
1029 1047
1030 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1048 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1031 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2))); 1049 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1032 1050
1033 memset(mn, 0, sizeof(struct access_chip_84xx)); 1051 memset(mn, 0, sizeof(struct access_chip_84xx));
@@ -1059,7 +1077,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
1059 "Vendor request 84xx updatefw completed.\n"); 1077 "Vendor request 84xx updatefw completed.\n");
1060 1078
1061 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1079 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1062 bsg_job->reply->result = DID_OK; 1080 bsg_reply->result = DID_OK;
1063 } 1081 }
1064 1082
1065 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1083 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
@@ -1072,14 +1090,17 @@ done_unmap_sg:
1072 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1090 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1073 1091
1074 if (!rval) 1092 if (!rval)
1075 bsg_job->job_done(bsg_job); 1093 bsg_job_done(bsg_job, bsg_reply->result,
1094 bsg_reply->reply_payload_rcv_len);
1076 return rval; 1095 return rval;
1077} 1096}
1078 1097
1079static int 1098static int
1080qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) 1099qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1081{ 1100{
1082 struct Scsi_Host *host = bsg_job->shost; 1101 struct fc_bsg_request *bsg_request = bsg_job->request;
1102 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1103 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1083 scsi_qla_host_t *vha = shost_priv(host); 1104 scsi_qla_host_t *vha = shost_priv(host);
1084 struct qla_hw_data *ha = vha->hw; 1105 struct qla_hw_data *ha = vha->hw;
1085 struct access_chip_84xx *mn = NULL; 1106 struct access_chip_84xx *mn = NULL;
@@ -1107,7 +1128,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1107 memset(mn, 0, sizeof(struct access_chip_84xx)); 1128 memset(mn, 0, sizeof(struct access_chip_84xx));
1108 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1129 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1109 mn->entry_count = 1; 1130 mn->entry_count = 1;
1110 ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request); 1131 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1111 switch (ql84_mgmt->mgmt.cmd) { 1132 switch (ql84_mgmt->mgmt.cmd) {
1112 case QLA84_MGMT_READ_MEM: 1133 case QLA84_MGMT_READ_MEM:
1113 case QLA84_MGMT_GET_INFO: 1134 case QLA84_MGMT_GET_INFO:
@@ -1239,11 +1260,11 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1239 "Vendor request 84xx mgmt completed.\n"); 1260 "Vendor request 84xx mgmt completed.\n");
1240 1261
1241 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1262 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1242 bsg_job->reply->result = DID_OK; 1263 bsg_reply->result = DID_OK;
1243 1264
1244 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1265 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1245 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1266 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1246 bsg_job->reply->reply_payload_rcv_len = 1267 bsg_reply->reply_payload_rcv_len =
1247 bsg_job->reply_payload.payload_len; 1268 bsg_job->reply_payload.payload_len;
1248 1269
1249 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1270 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
@@ -1267,14 +1288,17 @@ exit_mgmt:
1267 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1288 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1268 1289
1269 if (!rval) 1290 if (!rval)
1270 bsg_job->job_done(bsg_job); 1291 bsg_job_done(bsg_job, bsg_reply->result,
1292 bsg_reply->reply_payload_rcv_len);
1271 return rval; 1293 return rval;
1272} 1294}
1273 1295
1274static int 1296static int
1275qla24xx_iidma(struct fc_bsg_job *bsg_job) 1297qla24xx_iidma(struct bsg_job *bsg_job)
1276{ 1298{
1277 struct Scsi_Host *host = bsg_job->shost; 1299 struct fc_bsg_request *bsg_request = bsg_job->request;
1300 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1301 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1278 scsi_qla_host_t *vha = shost_priv(host); 1302 scsi_qla_host_t *vha = shost_priv(host);
1279 int rval = 0; 1303 int rval = 0;
1280 struct qla_port_param *port_param = NULL; 1304 struct qla_port_param *port_param = NULL;
@@ -1288,7 +1312,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1288 return -EINVAL; 1312 return -EINVAL;
1289 } 1313 }
1290 1314
1291 port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request); 1315 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1292 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1316 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1293 ql_log(ql_log_warn, vha, 0x7048, 1317 ql_log(ql_log_warn, vha, 0x7048,
1294 "Invalid destination type.\n"); 1318 "Invalid destination type.\n");
@@ -1343,24 +1367,26 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1343 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1367 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1344 sizeof(struct qla_port_param); 1368 sizeof(struct qla_port_param);
1345 1369
1346 rsp_ptr = ((uint8_t *)bsg_job->reply) + 1370 rsp_ptr = ((uint8_t *)bsg_reply) +
1347 sizeof(struct fc_bsg_reply); 1371 sizeof(struct fc_bsg_reply);
1348 1372
1349 memcpy(rsp_ptr, port_param, 1373 memcpy(rsp_ptr, port_param,
1350 sizeof(struct qla_port_param)); 1374 sizeof(struct qla_port_param));
1351 } 1375 }
1352 1376
1353 bsg_job->reply->result = DID_OK; 1377 bsg_reply->result = DID_OK;
1354 bsg_job->job_done(bsg_job); 1378 bsg_job_done(bsg_job, bsg_reply->result,
1379 bsg_reply->reply_payload_rcv_len);
1355 } 1380 }
1356 1381
1357 return rval; 1382 return rval;
1358} 1383}
1359 1384
1360static int 1385static int
1361qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, 1386qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1362 uint8_t is_update) 1387 uint8_t is_update)
1363{ 1388{
1389 struct fc_bsg_request *bsg_request = bsg_job->request;
1364 uint32_t start = 0; 1390 uint32_t start = 0;
1365 int valid = 0; 1391 int valid = 0;
1366 struct qla_hw_data *ha = vha->hw; 1392 struct qla_hw_data *ha = vha->hw;
@@ -1368,7 +1394,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1368 if (unlikely(pci_channel_offline(ha->pdev))) 1394 if (unlikely(pci_channel_offline(ha->pdev)))
1369 return -EINVAL; 1395 return -EINVAL;
1370 1396
1371 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1397 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1372 if (start > ha->optrom_size) { 1398 if (start > ha->optrom_size) {
1373 ql_log(ql_log_warn, vha, 0x7055, 1399 ql_log(ql_log_warn, vha, 0x7055,
1374 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1400 "start %d > optrom_size %d.\n", start, ha->optrom_size);
@@ -1427,9 +1453,10 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1427} 1453}
1428 1454
1429static int 1455static int
1430qla2x00_read_optrom(struct fc_bsg_job *bsg_job) 1456qla2x00_read_optrom(struct bsg_job *bsg_job)
1431{ 1457{
1432 struct Scsi_Host *host = bsg_job->shost; 1458 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1459 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1433 scsi_qla_host_t *vha = shost_priv(host); 1460 scsi_qla_host_t *vha = shost_priv(host);
1434 struct qla_hw_data *ha = vha->hw; 1461 struct qla_hw_data *ha = vha->hw;
1435 int rval = 0; 1462 int rval = 0;
@@ -1451,20 +1478,22 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1451 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1478 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1452 ha->optrom_region_size); 1479 ha->optrom_region_size);
1453 1480
1454 bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size; 1481 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1455 bsg_job->reply->result = DID_OK; 1482 bsg_reply->result = DID_OK;
1456 vfree(ha->optrom_buffer); 1483 vfree(ha->optrom_buffer);
1457 ha->optrom_buffer = NULL; 1484 ha->optrom_buffer = NULL;
1458 ha->optrom_state = QLA_SWAITING; 1485 ha->optrom_state = QLA_SWAITING;
1459 mutex_unlock(&ha->optrom_mutex); 1486 mutex_unlock(&ha->optrom_mutex);
1460 bsg_job->job_done(bsg_job); 1487 bsg_job_done(bsg_job, bsg_reply->result,
1488 bsg_reply->reply_payload_rcv_len);
1461 return rval; 1489 return rval;
1462} 1490}
1463 1491
1464static int 1492static int
1465qla2x00_update_optrom(struct fc_bsg_job *bsg_job) 1493qla2x00_update_optrom(struct bsg_job *bsg_job)
1466{ 1494{
1467 struct Scsi_Host *host = bsg_job->shost; 1495 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1496 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1468 scsi_qla_host_t *vha = shost_priv(host); 1497 scsi_qla_host_t *vha = shost_priv(host);
1469 struct qla_hw_data *ha = vha->hw; 1498 struct qla_hw_data *ha = vha->hw;
1470 int rval = 0; 1499 int rval = 0;
@@ -1486,19 +1515,21 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1486 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1515 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1487 ha->optrom_region_start, ha->optrom_region_size); 1516 ha->optrom_region_start, ha->optrom_region_size);
1488 1517
1489 bsg_job->reply->result = DID_OK; 1518 bsg_reply->result = DID_OK;
1490 vfree(ha->optrom_buffer); 1519 vfree(ha->optrom_buffer);
1491 ha->optrom_buffer = NULL; 1520 ha->optrom_buffer = NULL;
1492 ha->optrom_state = QLA_SWAITING; 1521 ha->optrom_state = QLA_SWAITING;
1493 mutex_unlock(&ha->optrom_mutex); 1522 mutex_unlock(&ha->optrom_mutex);
1494 bsg_job->job_done(bsg_job); 1523 bsg_job_done(bsg_job, bsg_reply->result,
1524 bsg_reply->reply_payload_rcv_len);
1495 return rval; 1525 return rval;
1496} 1526}
1497 1527
1498static int 1528static int
1499qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) 1529qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1500{ 1530{
1501 struct Scsi_Host *host = bsg_job->shost; 1531 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1532 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1502 scsi_qla_host_t *vha = shost_priv(host); 1533 scsi_qla_host_t *vha = shost_priv(host);
1503 struct qla_hw_data *ha = vha->hw; 1534 struct qla_hw_data *ha = vha->hw;
1504 int rval = 0; 1535 int rval = 0;
@@ -1509,7 +1540,7 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1509 dma_addr_t sfp_dma; 1540 dma_addr_t sfp_dma;
1510 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1541 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1511 if (!sfp) { 1542 if (!sfp) {
1512 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1543 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1513 EXT_STATUS_NO_MEMORY; 1544 EXT_STATUS_NO_MEMORY;
1514 goto done; 1545 goto done;
1515 } 1546 }
@@ -1525,30 +1556,32 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1525 image->field_address.device, image->field_address.offset, 1556 image->field_address.device, image->field_address.offset,
1526 sizeof(image->field_info), image->field_address.option); 1557 sizeof(image->field_info), image->field_address.option);
1527 if (rval) { 1558 if (rval) {
1528 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1559 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1529 EXT_STATUS_MAILBOX; 1560 EXT_STATUS_MAILBOX;
1530 goto dealloc; 1561 goto dealloc;
1531 } 1562 }
1532 image++; 1563 image++;
1533 } 1564 }
1534 1565
1535 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1566 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1536 1567
1537dealloc: 1568dealloc:
1538 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1569 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1539 1570
1540done: 1571done:
1541 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1572 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1542 bsg_job->reply->result = DID_OK << 16; 1573 bsg_reply->result = DID_OK << 16;
1543 bsg_job->job_done(bsg_job); 1574 bsg_job_done(bsg_job, bsg_reply->result,
1575 bsg_reply->reply_payload_rcv_len);
1544 1576
1545 return 0; 1577 return 0;
1546} 1578}
1547 1579
1548static int 1580static int
1549qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) 1581qla2x00_read_fru_status(struct bsg_job *bsg_job)
1550{ 1582{
1551 struct Scsi_Host *host = bsg_job->shost; 1583 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1584 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1552 scsi_qla_host_t *vha = shost_priv(host); 1585 scsi_qla_host_t *vha = shost_priv(host);
1553 struct qla_hw_data *ha = vha->hw; 1586 struct qla_hw_data *ha = vha->hw;
1554 int rval = 0; 1587 int rval = 0;
@@ -1557,7 +1590,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1557 dma_addr_t sfp_dma; 1590 dma_addr_t sfp_dma;
1558 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1591 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1559 if (!sfp) { 1592 if (!sfp) {
1560 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1593 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1561 EXT_STATUS_NO_MEMORY; 1594 EXT_STATUS_NO_MEMORY;
1562 goto done; 1595 goto done;
1563 } 1596 }
@@ -1571,7 +1604,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1571 sr->status_reg = *sfp; 1604 sr->status_reg = *sfp;
1572 1605
1573 if (rval) { 1606 if (rval) {
1574 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1607 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1575 EXT_STATUS_MAILBOX; 1608 EXT_STATUS_MAILBOX;
1576 goto dealloc; 1609 goto dealloc;
1577 } 1610 }
@@ -1579,24 +1612,26 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1579 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1612 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1580 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1613 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1581 1614
1582 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1615 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1583 1616
1584dealloc: 1617dealloc:
1585 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1618 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1586 1619
1587done: 1620done:
1588 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1621 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1589 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr); 1622 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1590 bsg_job->reply->result = DID_OK << 16; 1623 bsg_reply->result = DID_OK << 16;
1591 bsg_job->job_done(bsg_job); 1624 bsg_job_done(bsg_job, bsg_reply->result,
1625 bsg_reply->reply_payload_rcv_len);
1592 1626
1593 return 0; 1627 return 0;
1594} 1628}
1595 1629
1596static int 1630static int
1597qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) 1631qla2x00_write_fru_status(struct bsg_job *bsg_job)
1598{ 1632{
1599 struct Scsi_Host *host = bsg_job->shost; 1633 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1634 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1600 scsi_qla_host_t *vha = shost_priv(host); 1635 scsi_qla_host_t *vha = shost_priv(host);
1601 struct qla_hw_data *ha = vha->hw; 1636 struct qla_hw_data *ha = vha->hw;
1602 int rval = 0; 1637 int rval = 0;
@@ -1605,7 +1640,7 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1605 dma_addr_t sfp_dma; 1640 dma_addr_t sfp_dma;
1606 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1641 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1607 if (!sfp) { 1642 if (!sfp) {
1608 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1643 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1609 EXT_STATUS_NO_MEMORY; 1644 EXT_STATUS_NO_MEMORY;
1610 goto done; 1645 goto done;
1611 } 1646 }
@@ -1619,28 +1654,30 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1619 sizeof(sr->status_reg), sr->field_address.option); 1654 sizeof(sr->status_reg), sr->field_address.option);
1620 1655
1621 if (rval) { 1656 if (rval) {
1622 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1657 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1623 EXT_STATUS_MAILBOX; 1658 EXT_STATUS_MAILBOX;
1624 goto dealloc; 1659 goto dealloc;
1625 } 1660 }
1626 1661
1627 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1662 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1628 1663
1629dealloc: 1664dealloc:
1630 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1665 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1631 1666
1632done: 1667done:
1633 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1668 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1634 bsg_job->reply->result = DID_OK << 16; 1669 bsg_reply->result = DID_OK << 16;
1635 bsg_job->job_done(bsg_job); 1670 bsg_job_done(bsg_job, bsg_reply->result,
1671 bsg_reply->reply_payload_rcv_len);
1636 1672
1637 return 0; 1673 return 0;
1638} 1674}
1639 1675
1640static int 1676static int
1641qla2x00_write_i2c(struct fc_bsg_job *bsg_job) 1677qla2x00_write_i2c(struct bsg_job *bsg_job)
1642{ 1678{
1643 struct Scsi_Host *host = bsg_job->shost; 1679 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1680 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1644 scsi_qla_host_t *vha = shost_priv(host); 1681 scsi_qla_host_t *vha = shost_priv(host);
1645 struct qla_hw_data *ha = vha->hw; 1682 struct qla_hw_data *ha = vha->hw;
1646 int rval = 0; 1683 int rval = 0;
@@ -1649,7 +1686,7 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
1649 dma_addr_t sfp_dma; 1686 dma_addr_t sfp_dma;
1650 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1687 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1651 if (!sfp) { 1688 if (!sfp) {
1652 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1689 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1653 EXT_STATUS_NO_MEMORY; 1690 EXT_STATUS_NO_MEMORY;
1654 goto done; 1691 goto done;
1655 } 1692 }
@@ -1662,28 +1699,30 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
1662 i2c->device, i2c->offset, i2c->length, i2c->option); 1699 i2c->device, i2c->offset, i2c->length, i2c->option);
1663 1700
1664 if (rval) { 1701 if (rval) {
1665 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1702 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1666 EXT_STATUS_MAILBOX; 1703 EXT_STATUS_MAILBOX;
1667 goto dealloc; 1704 goto dealloc;
1668 } 1705 }
1669 1706
1670 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1707 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1671 1708
1672dealloc: 1709dealloc:
1673 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1710 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1674 1711
1675done: 1712done:
1676 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1713 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1677 bsg_job->reply->result = DID_OK << 16; 1714 bsg_reply->result = DID_OK << 16;
1678 bsg_job->job_done(bsg_job); 1715 bsg_job_done(bsg_job, bsg_reply->result,
1716 bsg_reply->reply_payload_rcv_len);
1679 1717
1680 return 0; 1718 return 0;
1681} 1719}
1682 1720
1683static int 1721static int
1684qla2x00_read_i2c(struct fc_bsg_job *bsg_job) 1722qla2x00_read_i2c(struct bsg_job *bsg_job)
1685{ 1723{
1686 struct Scsi_Host *host = bsg_job->shost; 1724 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1725 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1687 scsi_qla_host_t *vha = shost_priv(host); 1726 scsi_qla_host_t *vha = shost_priv(host);
1688 struct qla_hw_data *ha = vha->hw; 1727 struct qla_hw_data *ha = vha->hw;
1689 int rval = 0; 1728 int rval = 0;
@@ -1692,7 +1731,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1692 dma_addr_t sfp_dma; 1731 dma_addr_t sfp_dma;
1693 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1732 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1694 if (!sfp) { 1733 if (!sfp) {
1695 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1734 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1696 EXT_STATUS_NO_MEMORY; 1735 EXT_STATUS_NO_MEMORY;
1697 goto done; 1736 goto done;
1698 } 1737 }
@@ -1704,7 +1743,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1704 i2c->device, i2c->offset, i2c->length, i2c->option); 1743 i2c->device, i2c->offset, i2c->length, i2c->option);
1705 1744
1706 if (rval) { 1745 if (rval) {
1707 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1746 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1708 EXT_STATUS_MAILBOX; 1747 EXT_STATUS_MAILBOX;
1709 goto dealloc; 1748 goto dealloc;
1710 } 1749 }
@@ -1713,24 +1752,26 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1713 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1752 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1714 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1753 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1715 1754
1716 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1755 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1717 1756
1718dealloc: 1757dealloc:
1719 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1758 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1720 1759
1721done: 1760done:
1722 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1761 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1723 bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c); 1762 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1724 bsg_job->reply->result = DID_OK << 16; 1763 bsg_reply->result = DID_OK << 16;
1725 bsg_job->job_done(bsg_job); 1764 bsg_job_done(bsg_job, bsg_reply->result,
1765 bsg_reply->reply_payload_rcv_len);
1726 1766
1727 return 0; 1767 return 0;
1728} 1768}
1729 1769
1730static int 1770static int
1731qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job) 1771qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1732{ 1772{
1733 struct Scsi_Host *host = bsg_job->shost; 1773 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1774 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1734 scsi_qla_host_t *vha = shost_priv(host); 1775 scsi_qla_host_t *vha = shost_priv(host);
1735 struct qla_hw_data *ha = vha->hw; 1776 struct qla_hw_data *ha = vha->hw;
1736 uint32_t rval = EXT_STATUS_OK; 1777 uint32_t rval = EXT_STATUS_OK;
@@ -1895,19 +1936,21 @@ done:
1895 /* Return an error vendor specific response 1936 /* Return an error vendor specific response
1896 * and complete the bsg request 1937 * and complete the bsg request
1897 */ 1938 */
1898 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1939 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1899 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1940 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1900 bsg_job->reply->reply_payload_rcv_len = 0; 1941 bsg_reply->reply_payload_rcv_len = 0;
1901 bsg_job->reply->result = (DID_OK) << 16; 1942 bsg_reply->result = (DID_OK) << 16;
1902 bsg_job->job_done(bsg_job); 1943 bsg_job_done(bsg_job, bsg_reply->result,
1944 bsg_reply->reply_payload_rcv_len);
1903 /* Always return success, vendor rsp carries correct status */ 1945 /* Always return success, vendor rsp carries correct status */
1904 return 0; 1946 return 0;
1905} 1947}
1906 1948
1907static int 1949static int
1908qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job) 1950qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1909{ 1951{
1910 struct Scsi_Host *host = bsg_job->shost; 1952 struct fc_bsg_request *bsg_request = bsg_job->request;
1953 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1911 scsi_qla_host_t *vha = shost_priv(host); 1954 scsi_qla_host_t *vha = shost_priv(host);
1912 struct qla_hw_data *ha = vha->hw; 1955 struct qla_hw_data *ha = vha->hw;
1913 int rval = (DRIVER_ERROR << 16); 1956 int rval = (DRIVER_ERROR << 16);
@@ -1919,7 +1962,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
1919 1962
1920 /* Copy the IOCB specific information */ 1963 /* Copy the IOCB specific information */
1921 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1964 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1922 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1965 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1923 1966
1924 /* Dump the vendor information */ 1967 /* Dump the vendor information */
1925 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 1968 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
@@ -2027,9 +2070,10 @@ done:
2027} 2070}
2028 2071
2029static int 2072static int
2030qla26xx_serdes_op(struct fc_bsg_job *bsg_job) 2073qla26xx_serdes_op(struct bsg_job *bsg_job)
2031{ 2074{
2032 struct Scsi_Host *host = bsg_job->shost; 2075 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2076 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2033 scsi_qla_host_t *vha = shost_priv(host); 2077 scsi_qla_host_t *vha = shost_priv(host);
2034 int rval = 0; 2078 int rval = 0;
2035 struct qla_serdes_reg sr; 2079 struct qla_serdes_reg sr;
@@ -2042,13 +2086,13 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
2042 switch (sr.cmd) { 2086 switch (sr.cmd) {
2043 case INT_SC_SERDES_WRITE_REG: 2087 case INT_SC_SERDES_WRITE_REG:
2044 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); 2088 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2045 bsg_job->reply->reply_payload_rcv_len = 0; 2089 bsg_reply->reply_payload_rcv_len = 0;
2046 break; 2090 break;
2047 case INT_SC_SERDES_READ_REG: 2091 case INT_SC_SERDES_READ_REG:
2048 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); 2092 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2049 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2093 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2050 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2094 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2051 bsg_job->reply->reply_payload_rcv_len = sizeof(sr); 2095 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2052 break; 2096 break;
2053 default: 2097 default:
2054 ql_dbg(ql_dbg_user, vha, 0x708c, 2098 ql_dbg(ql_dbg_user, vha, 0x708c,
@@ -2057,19 +2101,21 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
2057 break; 2101 break;
2058 } 2102 }
2059 2103
2060 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 2104 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2061 rval ? EXT_STATUS_MAILBOX : 0; 2105 rval ? EXT_STATUS_MAILBOX : 0;
2062 2106
2063 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2107 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2064 bsg_job->reply->result = DID_OK << 16; 2108 bsg_reply->result = DID_OK << 16;
2065 bsg_job->job_done(bsg_job); 2109 bsg_job_done(bsg_job, bsg_reply->result,
2110 bsg_reply->reply_payload_rcv_len);
2066 return 0; 2111 return 0;
2067} 2112}
2068 2113
2069static int 2114static int
2070qla8044_serdes_op(struct fc_bsg_job *bsg_job) 2115qla8044_serdes_op(struct bsg_job *bsg_job)
2071{ 2116{
2072 struct Scsi_Host *host = bsg_job->shost; 2117 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2118 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2073 scsi_qla_host_t *vha = shost_priv(host); 2119 scsi_qla_host_t *vha = shost_priv(host);
2074 int rval = 0; 2120 int rval = 0;
2075 struct qla_serdes_reg_ex sr; 2121 struct qla_serdes_reg_ex sr;
@@ -2082,13 +2128,13 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
2082 switch (sr.cmd) { 2128 switch (sr.cmd) {
2083 case INT_SC_SERDES_WRITE_REG: 2129 case INT_SC_SERDES_WRITE_REG:
2084 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); 2130 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2085 bsg_job->reply->reply_payload_rcv_len = 0; 2131 bsg_reply->reply_payload_rcv_len = 0;
2086 break; 2132 break;
2087 case INT_SC_SERDES_READ_REG: 2133 case INT_SC_SERDES_READ_REG:
2088 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); 2134 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2089 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2135 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2090 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2136 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2091 bsg_job->reply->reply_payload_rcv_len = sizeof(sr); 2137 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2092 break; 2138 break;
2093 default: 2139 default:
2094 ql_dbg(ql_dbg_user, vha, 0x70cf, 2140 ql_dbg(ql_dbg_user, vha, 0x70cf,
@@ -2097,19 +2143,21 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
2097 break; 2143 break;
2098 } 2144 }
2099 2145
2100 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 2146 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2101 rval ? EXT_STATUS_MAILBOX : 0; 2147 rval ? EXT_STATUS_MAILBOX : 0;
2102 2148
2103 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2149 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2104 bsg_job->reply->result = DID_OK << 16; 2150 bsg_reply->result = DID_OK << 16;
2105 bsg_job->job_done(bsg_job); 2151 bsg_job_done(bsg_job, bsg_reply->result,
2152 bsg_reply->reply_payload_rcv_len);
2106 return 0; 2153 return 0;
2107} 2154}
2108 2155
2109static int 2156static int
2110qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job) 2157qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2111{ 2158{
2112 struct Scsi_Host *host = bsg_job->shost; 2159 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2160 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2113 scsi_qla_host_t *vha = shost_priv(host); 2161 scsi_qla_host_t *vha = shost_priv(host);
2114 struct qla_hw_data *ha = vha->hw; 2162 struct qla_hw_data *ha = vha->hw;
2115 struct qla_flash_update_caps cap; 2163 struct qla_flash_update_caps cap;
@@ -2125,21 +2173,23 @@ qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
2125 2173
2126 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2174 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2127 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); 2175 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2128 bsg_job->reply->reply_payload_rcv_len = sizeof(cap); 2176 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2129 2177
2130 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 2178 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2131 EXT_STATUS_OK; 2179 EXT_STATUS_OK;
2132 2180
2133 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2181 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2134 bsg_job->reply->result = DID_OK << 16; 2182 bsg_reply->result = DID_OK << 16;
2135 bsg_job->job_done(bsg_job); 2183 bsg_job_done(bsg_job, bsg_reply->result,
2184 bsg_reply->reply_payload_rcv_len);
2136 return 0; 2185 return 0;
2137} 2186}
2138 2187
2139static int 2188static int
2140qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job) 2189qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2141{ 2190{
2142 struct Scsi_Host *host = bsg_job->shost; 2191 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2192 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2143 scsi_qla_host_t *vha = shost_priv(host); 2193 scsi_qla_host_t *vha = shost_priv(host);
2144 struct qla_hw_data *ha = vha->hw; 2194 struct qla_hw_data *ha = vha->hw;
2145 uint64_t online_fw_attr = 0; 2195 uint64_t online_fw_attr = 0;
@@ -2158,32 +2208,34 @@ qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
2158 (uint64_t)ha->fw_attributes; 2208 (uint64_t)ha->fw_attributes;
2159 2209
2160 if (online_fw_attr != cap.capabilities) { 2210 if (online_fw_attr != cap.capabilities) {
2161 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 2211 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2162 EXT_STATUS_INVALID_PARAM; 2212 EXT_STATUS_INVALID_PARAM;
2163 return -EINVAL; 2213 return -EINVAL;
2164 } 2214 }
2165 2215
2166 if (cap.outage_duration < MAX_LOOP_TIMEOUT) { 2216 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2167 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 2217 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2168 EXT_STATUS_INVALID_PARAM; 2218 EXT_STATUS_INVALID_PARAM;
2169 return -EINVAL; 2219 return -EINVAL;
2170 } 2220 }
2171 2221
2172 bsg_job->reply->reply_payload_rcv_len = 0; 2222 bsg_reply->reply_payload_rcv_len = 0;
2173 2223
2174 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 2224 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2175 EXT_STATUS_OK; 2225 EXT_STATUS_OK;
2176 2226
2177 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2227 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2178 bsg_job->reply->result = DID_OK << 16; 2228 bsg_reply->result = DID_OK << 16;
2179 bsg_job->job_done(bsg_job); 2229 bsg_job_done(bsg_job, bsg_reply->result,
2230 bsg_reply->reply_payload_rcv_len);
2180 return 0; 2231 return 0;
2181} 2232}
2182 2233
2183static int 2234static int
2184qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job) 2235qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2185{ 2236{
2186 struct Scsi_Host *host = bsg_job->shost; 2237 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2238 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2187 scsi_qla_host_t *vha = shost_priv(host); 2239 scsi_qla_host_t *vha = shost_priv(host);
2188 struct qla_hw_data *ha = vha->hw; 2240 struct qla_hw_data *ha = vha->hw;
2189 struct qla_bbcr_data bbcr; 2241 struct qla_bbcr_data bbcr;
@@ -2227,27 +2279,30 @@ qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
2227done: 2279done:
2228 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2280 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2229 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); 2281 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2230 bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr); 2282 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2231 2283
2232 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2284 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2233 2285
2234 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2286 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2235 bsg_job->reply->result = DID_OK << 16; 2287 bsg_reply->result = DID_OK << 16;
2236 bsg_job->job_done(bsg_job); 2288 bsg_job_done(bsg_job, bsg_reply->result,
2289 bsg_reply->reply_payload_rcv_len);
2237 return 0; 2290 return 0;
2238} 2291}
2239 2292
2240static int 2293static int
2241qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job) 2294qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2242{ 2295{
2243 struct Scsi_Host *host = bsg_job->shost; 2296 struct fc_bsg_request *bsg_request = bsg_job->request;
2297 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2298 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2244 scsi_qla_host_t *vha = shost_priv(host); 2299 scsi_qla_host_t *vha = shost_priv(host);
2245 struct qla_hw_data *ha = vha->hw; 2300 struct qla_hw_data *ha = vha->hw;
2246 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2301 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2247 struct link_statistics *stats = NULL; 2302 struct link_statistics *stats = NULL;
2248 dma_addr_t stats_dma; 2303 dma_addr_t stats_dma;
2249 int rval; 2304 int rval;
2250 uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd; 2305 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2251 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; 2306 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2252 2307
2253 if (test_bit(UNLOADING, &vha->dpc_flags)) 2308 if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -2281,13 +2336,14 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
2281 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); 2336 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2282 } 2337 }
2283 2338
2284 bsg_job->reply->reply_payload_rcv_len = sizeof(*stats); 2339 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2285 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 2340 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2286 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2341 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2287 2342
2288 bsg_job->reply_len = sizeof(*bsg_job->reply); 2343 bsg_job->reply_len = sizeof(*bsg_reply);
2289 bsg_job->reply->result = DID_OK << 16; 2344 bsg_reply->result = DID_OK << 16;
2290 bsg_job->job_done(bsg_job); 2345 bsg_job_done(bsg_job, bsg_reply->result,
2346 bsg_reply->reply_payload_rcv_len);
2291 2347
2292 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2348 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2293 stats, stats_dma); 2349 stats, stats_dma);
@@ -2296,9 +2352,10 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
2296} 2352}
2297 2353
2298static int 2354static int
2299qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job) 2355qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2300{ 2356{
2301 struct Scsi_Host *host = bsg_job->shost; 2357 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2358 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2302 scsi_qla_host_t *vha = shost_priv(host); 2359 scsi_qla_host_t *vha = shost_priv(host);
2303 int rval; 2360 int rval;
2304 struct qla_dport_diag *dd; 2361 struct qla_dport_diag *dd;
@@ -2323,13 +2380,14 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
2323 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2380 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2324 } 2381 }
2325 2382
2326 bsg_job->reply->reply_payload_rcv_len = sizeof(*dd); 2383 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2327 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 2384 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2328 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2385 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2329 2386
2330 bsg_job->reply_len = sizeof(*bsg_job->reply); 2387 bsg_job->reply_len = sizeof(*bsg_reply);
2331 bsg_job->reply->result = DID_OK << 16; 2388 bsg_reply->result = DID_OK << 16;
2332 bsg_job->job_done(bsg_job); 2389 bsg_job_done(bsg_job, bsg_reply->result,
2390 bsg_reply->reply_payload_rcv_len);
2333 2391
2334 kfree(dd); 2392 kfree(dd);
2335 2393
@@ -2337,9 +2395,11 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
2337} 2395}
2338 2396
2339static int 2397static int
2340qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 2398qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2341{ 2399{
2342 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 2400 struct fc_bsg_request *bsg_request = bsg_job->request;
2401
2402 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2343 case QL_VND_LOOPBACK: 2403 case QL_VND_LOOPBACK:
2344 return qla2x00_process_loopback(bsg_job); 2404 return qla2x00_process_loopback(bsg_job);
2345 2405
@@ -2413,36 +2473,38 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2413} 2473}
2414 2474
2415int 2475int
2416qla24xx_bsg_request(struct fc_bsg_job *bsg_job) 2476qla24xx_bsg_request(struct bsg_job *bsg_job)
2417{ 2477{
2478 struct fc_bsg_request *bsg_request = bsg_job->request;
2479 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2418 int ret = -EINVAL; 2480 int ret = -EINVAL;
2419 struct fc_rport *rport; 2481 struct fc_rport *rport;
2420 struct Scsi_Host *host; 2482 struct Scsi_Host *host;
2421 scsi_qla_host_t *vha; 2483 scsi_qla_host_t *vha;
2422 2484
2423 /* In case no data transferred. */ 2485 /* In case no data transferred. */
2424 bsg_job->reply->reply_payload_rcv_len = 0; 2486 bsg_reply->reply_payload_rcv_len = 0;
2425 2487
2426 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 2488 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2427 rport = bsg_job->rport; 2489 rport = fc_bsg_to_rport(bsg_job);
2428 host = rport_to_shost(rport); 2490 host = rport_to_shost(rport);
2429 vha = shost_priv(host); 2491 vha = shost_priv(host);
2430 } else { 2492 } else {
2431 host = bsg_job->shost; 2493 host = fc_bsg_to_shost(bsg_job);
2432 vha = shost_priv(host); 2494 vha = shost_priv(host);
2433 } 2495 }
2434 2496
2435 if (qla2x00_reset_active(vha)) { 2497 if (qla2x00_reset_active(vha)) {
2436 ql_dbg(ql_dbg_user, vha, 0x709f, 2498 ql_dbg(ql_dbg_user, vha, 0x709f,
2437 "BSG: ISP abort active/needed -- cmd=%d.\n", 2499 "BSG: ISP abort active/needed -- cmd=%d.\n",
2438 bsg_job->request->msgcode); 2500 bsg_request->msgcode);
2439 return -EBUSY; 2501 return -EBUSY;
2440 } 2502 }
2441 2503
2442 ql_dbg(ql_dbg_user, vha, 0x7000, 2504 ql_dbg(ql_dbg_user, vha, 0x7000,
2443 "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode); 2505 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2444 2506
2445 switch (bsg_job->request->msgcode) { 2507 switch (bsg_request->msgcode) {
2446 case FC_BSG_RPT_ELS: 2508 case FC_BSG_RPT_ELS:
2447 case FC_BSG_HST_ELS_NOLOGIN: 2509 case FC_BSG_HST_ELS_NOLOGIN:
2448 ret = qla2x00_process_els(bsg_job); 2510 ret = qla2x00_process_els(bsg_job);
@@ -2464,9 +2526,10 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2464} 2526}
2465 2527
2466int 2528int
2467qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) 2529qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2468{ 2530{
2469 scsi_qla_host_t *vha = shost_priv(bsg_job->shost); 2531 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2532 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2470 struct qla_hw_data *ha = vha->hw; 2533 struct qla_hw_data *ha = vha->hw;
2471 srb_t *sp; 2534 srb_t *sp;
2472 int cnt, que; 2535 int cnt, que;
@@ -2494,13 +2557,13 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2494 "mbx abort_command " 2557 "mbx abort_command "
2495 "failed.\n"); 2558 "failed.\n");
2496 bsg_job->req->errors = 2559 bsg_job->req->errors =
2497 bsg_job->reply->result = -EIO; 2560 bsg_reply->result = -EIO;
2498 } else { 2561 } else {
2499 ql_dbg(ql_dbg_user, vha, 0x708a, 2562 ql_dbg(ql_dbg_user, vha, 0x708a,
2500 "mbx abort_command " 2563 "mbx abort_command "
2501 "success.\n"); 2564 "success.\n");
2502 bsg_job->req->errors = 2565 bsg_job->req->errors =
2503 bsg_job->reply->result = 0; 2566 bsg_reply->result = 0;
2504 } 2567 }
2505 spin_lock_irqsave(&ha->hardware_lock, flags); 2568 spin_lock_irqsave(&ha->hardware_lock, flags);
2506 goto done; 2569 goto done;
@@ -2510,7 +2573,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2510 } 2573 }
2511 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2512 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 2575 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2513 bsg_job->req->errors = bsg_job->reply->result = -ENXIO; 2576 bsg_job->req->errors = bsg_reply->result = -ENXIO;
2514 return 0; 2577 return 0;
2515 2578
2516done: 2579done:
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 73b12e41d992..5236e3f2a06a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -403,7 +403,7 @@ typedef struct srb {
403 int iocbs; 403 int iocbs;
404 union { 404 union {
405 struct srb_iocb iocb_cmd; 405 struct srb_iocb iocb_cmd;
406 struct fc_bsg_job *bsg_job; 406 struct bsg_job *bsg_job;
407 struct srb_cmd scmd; 407 struct srb_cmd scmd;
408 } u; 408 } u;
409 void (*done)(void *, void *, int); 409 void (*done)(void *, void *, int);
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6ca00813c71f..c51d9f3359e3 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -733,8 +733,8 @@ extern int qla82xx_read_temperature(scsi_qla_host_t *);
733extern int qla8044_read_temperature(scsi_qla_host_t *); 733extern int qla8044_read_temperature(scsi_qla_host_t *);
734 734
735/* BSG related functions */ 735/* BSG related functions */
736extern int qla24xx_bsg_request(struct fc_bsg_job *); 736extern int qla24xx_bsg_request(struct bsg_job *);
737extern int qla24xx_bsg_timeout(struct fc_bsg_job *); 737extern int qla24xx_bsg_timeout(struct bsg_job *);
738extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t); 738extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
739extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, 739extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
740 dma_addr_t, size_t, uint32_t); 740 dma_addr_t, size_t, uint32_t);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b41265a75ed5..221ad8907893 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2197,7 +2197,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2197static void 2197static void
2198qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2198qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2199{ 2199{
2200 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2200 struct bsg_job *bsg_job = sp->u.bsg_job;
2201 struct fc_bsg_request *bsg_request = bsg_job->request;
2201 2202
2202 els_iocb->entry_type = ELS_IOCB_TYPE; 2203 els_iocb->entry_type = ELS_IOCB_TYPE;
2203 els_iocb->entry_count = 1; 2204 els_iocb->entry_count = 1;
@@ -2212,8 +2213,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2212 2213
2213 els_iocb->opcode = 2214 els_iocb->opcode =
2214 sp->type == SRB_ELS_CMD_RPT ? 2215 sp->type == SRB_ELS_CMD_RPT ?
2215 bsg_job->request->rqst_data.r_els.els_code : 2216 bsg_request->rqst_data.r_els.els_code :
2216 bsg_job->request->rqst_data.h_els.command_code; 2217 bsg_request->rqst_data.h_els.command_code;
2217 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2218 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2218 els_iocb->port_id[1] = sp->fcport->d_id.b.area; 2219 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2219 els_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2220 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
@@ -2250,7 +2251,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2250 uint16_t tot_dsds; 2251 uint16_t tot_dsds;
2251 scsi_qla_host_t *vha = sp->fcport->vha; 2252 scsi_qla_host_t *vha = sp->fcport->vha;
2252 struct qla_hw_data *ha = vha->hw; 2253 struct qla_hw_data *ha = vha->hw;
2253 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2254 struct bsg_job *bsg_job = sp->u.bsg_job;
2254 int loop_iterartion = 0; 2255 int loop_iterartion = 0;
2255 int entry_count = 1; 2256 int entry_count = 1;
2256 2257
@@ -2327,7 +2328,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2327 uint16_t tot_dsds; 2328 uint16_t tot_dsds;
2328 scsi_qla_host_t *vha = sp->fcport->vha; 2329 scsi_qla_host_t *vha = sp->fcport->vha;
2329 struct qla_hw_data *ha = vha->hw; 2330 struct qla_hw_data *ha = vha->hw;
2330 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2331 struct bsg_job *bsg_job = sp->u.bsg_job;
2331 int loop_iterartion = 0; 2332 int loop_iterartion = 0;
2332 int entry_count = 1; 2333 int entry_count = 1;
2333 2334
@@ -2833,7 +2834,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2833 struct scatterlist *sg; 2834 struct scatterlist *sg;
2834 int index; 2835 int index;
2835 int entry_count = 1; 2836 int entry_count = 1;
2836 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2837 struct bsg_job *bsg_job = sp->u.bsg_job;
2837 2838
2838 /*Update entry type to indicate bidir command */ 2839 /*Update entry type to indicate bidir command */
2839 *((uint32_t *)(&cmd_pkt->entry_type)) = 2840 *((uint32_t *)(&cmd_pkt->entry_type)) =
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 068c4e47fac9..19f18485a854 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1356,7 +1356,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1356 const char func[] = "CT_IOCB"; 1356 const char func[] = "CT_IOCB";
1357 const char *type; 1357 const char *type;
1358 srb_t *sp; 1358 srb_t *sp;
1359 struct fc_bsg_job *bsg_job; 1359 struct bsg_job *bsg_job;
1360 struct fc_bsg_reply *bsg_reply;
1360 uint16_t comp_status; 1361 uint16_t comp_status;
1361 int res; 1362 int res;
1362 1363
@@ -1365,6 +1366,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1365 return; 1366 return;
1366 1367
1367 bsg_job = sp->u.bsg_job; 1368 bsg_job = sp->u.bsg_job;
1369 bsg_reply = bsg_job->reply;
1368 1370
1369 type = "ct pass-through"; 1371 type = "ct pass-through";
1370 1372
@@ -1373,32 +1375,32 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1373 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1375 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1374 * fc payload to the caller 1376 * fc payload to the caller
1375 */ 1377 */
1376 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1378 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1377 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1379 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1378 1380
1379 if (comp_status != CS_COMPLETE) { 1381 if (comp_status != CS_COMPLETE) {
1380 if (comp_status == CS_DATA_UNDERRUN) { 1382 if (comp_status == CS_DATA_UNDERRUN) {
1381 res = DID_OK << 16; 1383 res = DID_OK << 16;
1382 bsg_job->reply->reply_payload_rcv_len = 1384 bsg_reply->reply_payload_rcv_len =
1383 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1385 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1384 1386
1385 ql_log(ql_log_warn, vha, 0x5048, 1387 ql_log(ql_log_warn, vha, 0x5048,
1386 "CT pass-through-%s error " 1388 "CT pass-through-%s error "
1387 "comp_status-status=0x%x total_byte = 0x%x.\n", 1389 "comp_status-status=0x%x total_byte = 0x%x.\n",
1388 type, comp_status, 1390 type, comp_status,
1389 bsg_job->reply->reply_payload_rcv_len); 1391 bsg_reply->reply_payload_rcv_len);
1390 } else { 1392 } else {
1391 ql_log(ql_log_warn, vha, 0x5049, 1393 ql_log(ql_log_warn, vha, 0x5049,
1392 "CT pass-through-%s error " 1394 "CT pass-through-%s error "
1393 "comp_status-status=0x%x.\n", type, comp_status); 1395 "comp_status-status=0x%x.\n", type, comp_status);
1394 res = DID_ERROR << 16; 1396 res = DID_ERROR << 16;
1395 bsg_job->reply->reply_payload_rcv_len = 0; 1397 bsg_reply->reply_payload_rcv_len = 0;
1396 } 1398 }
1397 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1399 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1398 (uint8_t *)pkt, sizeof(*pkt)); 1400 (uint8_t *)pkt, sizeof(*pkt));
1399 } else { 1401 } else {
1400 res = DID_OK << 16; 1402 res = DID_OK << 16;
1401 bsg_job->reply->reply_payload_rcv_len = 1403 bsg_reply->reply_payload_rcv_len =
1402 bsg_job->reply_payload.payload_len; 1404 bsg_job->reply_payload.payload_len;
1403 bsg_job->reply_len = 0; 1405 bsg_job->reply_len = 0;
1404 } 1406 }
@@ -1413,7 +1415,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1413 const char func[] = "ELS_CT_IOCB"; 1415 const char func[] = "ELS_CT_IOCB";
1414 const char *type; 1416 const char *type;
1415 srb_t *sp; 1417 srb_t *sp;
1416 struct fc_bsg_job *bsg_job; 1418 struct bsg_job *bsg_job;
1419 struct fc_bsg_reply *bsg_reply;
1417 uint16_t comp_status; 1420 uint16_t comp_status;
1418 uint32_t fw_status[3]; 1421 uint32_t fw_status[3];
1419 uint8_t* fw_sts_ptr; 1422 uint8_t* fw_sts_ptr;
@@ -1423,6 +1426,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1423 if (!sp) 1426 if (!sp)
1424 return; 1427 return;
1425 bsg_job = sp->u.bsg_job; 1428 bsg_job = sp->u.bsg_job;
1429 bsg_reply = bsg_job->reply;
1426 1430
1427 type = NULL; 1431 type = NULL;
1428 switch (sp->type) { 1432 switch (sp->type) {
@@ -1452,13 +1456,13 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1452 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1456 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1453 * fc payload to the caller 1457 * fc payload to the caller
1454 */ 1458 */
1455 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1459 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1456 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1460 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1457 1461
1458 if (comp_status != CS_COMPLETE) { 1462 if (comp_status != CS_COMPLETE) {
1459 if (comp_status == CS_DATA_UNDERRUN) { 1463 if (comp_status == CS_DATA_UNDERRUN) {
1460 res = DID_OK << 16; 1464 res = DID_OK << 16;
1461 bsg_job->reply->reply_payload_rcv_len = 1465 bsg_reply->reply_payload_rcv_len =
1462 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1466 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1463 1467
1464 ql_dbg(ql_dbg_user, vha, 0x503f, 1468 ql_dbg(ql_dbg_user, vha, 0x503f,
@@ -1480,7 +1484,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1480 le16_to_cpu(((struct els_sts_entry_24xx *) 1484 le16_to_cpu(((struct els_sts_entry_24xx *)
1481 pkt)->error_subcode_2)); 1485 pkt)->error_subcode_2));
1482 res = DID_ERROR << 16; 1486 res = DID_ERROR << 16;
1483 bsg_job->reply->reply_payload_rcv_len = 0; 1487 bsg_reply->reply_payload_rcv_len = 0;
1484 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1488 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1485 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1489 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1486 } 1490 }
@@ -1489,7 +1493,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1489 } 1493 }
1490 else { 1494 else {
1491 res = DID_OK << 16; 1495 res = DID_OK << 16;
1492 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1496 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1493 bsg_job->reply_len = 0; 1497 bsg_job->reply_len = 0;
1494 } 1498 }
1495 1499
@@ -1904,7 +1908,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1904 uint16_t scsi_status; 1908 uint16_t scsi_status;
1905 uint16_t thread_id; 1909 uint16_t thread_id;
1906 uint32_t rval = EXT_STATUS_OK; 1910 uint32_t rval = EXT_STATUS_OK;
1907 struct fc_bsg_job *bsg_job = NULL; 1911 struct bsg_job *bsg_job = NULL;
1912 struct fc_bsg_request *bsg_request;
1913 struct fc_bsg_reply *bsg_reply;
1908 sts_entry_t *sts; 1914 sts_entry_t *sts;
1909 struct sts_entry_24xx *sts24; 1915 struct sts_entry_24xx *sts24;
1910 sts = (sts_entry_t *) pkt; 1916 sts = (sts_entry_t *) pkt;
@@ -1919,11 +1925,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1919 } 1925 }
1920 1926
1921 sp = req->outstanding_cmds[index]; 1927 sp = req->outstanding_cmds[index];
1922 if (sp) { 1928 if (!sp) {
1923 /* Free outstanding command slot. */
1924 req->outstanding_cmds[index] = NULL;
1925 bsg_job = sp->u.bsg_job;
1926 } else {
1927 ql_log(ql_log_warn, vha, 0x70b0, 1929 ql_log(ql_log_warn, vha, 0x70b0,
1928 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 1930 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1929 req->id, index); 1931 req->id, index);
@@ -1932,6 +1934,12 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1932 return; 1934 return;
1933 } 1935 }
1934 1936
1937 /* Free outstanding command slot. */
1938 req->outstanding_cmds[index] = NULL;
1939 bsg_job = sp->u.bsg_job;
1940 bsg_request = bsg_job->request;
1941 bsg_reply = bsg_job->reply;
1942
1935 if (IS_FWI2_CAPABLE(ha)) { 1943 if (IS_FWI2_CAPABLE(ha)) {
1936 comp_status = le16_to_cpu(sts24->comp_status); 1944 comp_status = le16_to_cpu(sts24->comp_status);
1937 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1945 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
@@ -1940,14 +1948,14 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1940 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1948 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1941 } 1949 }
1942 1950
1943 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1951 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1944 switch (comp_status) { 1952 switch (comp_status) {
1945 case CS_COMPLETE: 1953 case CS_COMPLETE:
1946 if (scsi_status == 0) { 1954 if (scsi_status == 0) {
1947 bsg_job->reply->reply_payload_rcv_len = 1955 bsg_reply->reply_payload_rcv_len =
1948 bsg_job->reply_payload.payload_len; 1956 bsg_job->reply_payload.payload_len;
1949 vha->qla_stats.input_bytes += 1957 vha->qla_stats.input_bytes +=
1950 bsg_job->reply->reply_payload_rcv_len; 1958 bsg_reply->reply_payload_rcv_len;
1951 vha->qla_stats.input_requests++; 1959 vha->qla_stats.input_requests++;
1952 rval = EXT_STATUS_OK; 1960 rval = EXT_STATUS_OK;
1953 } 1961 }
@@ -2028,11 +2036,11 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2028 rval = EXT_STATUS_ERR; 2036 rval = EXT_STATUS_ERR;
2029 break; 2037 break;
2030 } 2038 }
2031 bsg_job->reply->reply_payload_rcv_len = 0; 2039 bsg_reply->reply_payload_rcv_len = 0;
2032 2040
2033done: 2041done:
2034 /* Return the vendor specific reply to API */ 2042 /* Return the vendor specific reply to API */
2035 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2043 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2036 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2044 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2037 /* Always return DID_OK, bsg will send the vendor specific response 2045 /* Always return DID_OK, bsg will send the vendor specific response
2038 * in this case only */ 2046 * in this case only */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 15dff7099955..02f1de18bc2b 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -10,6 +10,7 @@
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/ratelimit.h> 11#include <linux/ratelimit.h>
12#include <linux/vmalloc.h> 12#include <linux/vmalloc.h>
13#include <linux/bsg-lib.h>
13#include <scsi/scsi_tcq.h> 14#include <scsi/scsi_tcq.h>
14#include <linux/utsname.h> 15#include <linux/utsname.h>
15 16
@@ -2206,7 +2207,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2206{ 2207{
2207 const char func[] = "IOSB_IOCB"; 2208 const char func[] = "IOSB_IOCB";
2208 srb_t *sp; 2209 srb_t *sp;
2209 struct fc_bsg_job *bsg_job; 2210 struct bsg_job *bsg_job;
2211 struct fc_bsg_reply *bsg_reply;
2210 struct srb_iocb *iocb_job; 2212 struct srb_iocb *iocb_job;
2211 int res; 2213 int res;
2212 struct qla_mt_iocb_rsp_fx00 fstatus; 2214 struct qla_mt_iocb_rsp_fx00 fstatus;
@@ -2226,6 +2228,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2226 pkt->dataword_r; 2228 pkt->dataword_r;
2227 } else { 2229 } else {
2228 bsg_job = sp->u.bsg_job; 2230 bsg_job = sp->u.bsg_job;
2231 bsg_reply = bsg_job->reply;
2229 2232
2230 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00)); 2233 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
2231 2234
@@ -2257,8 +2260,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2257 sp->fcport->vha, 0x5074, 2260 sp->fcport->vha, 0x5074,
2258 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00)); 2261 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
2259 2262
2260 res = bsg_job->reply->result = DID_OK << 16; 2263 res = bsg_reply->result = DID_OK << 16;
2261 bsg_job->reply->reply_payload_rcv_len = 2264 bsg_reply->reply_payload_rcv_len =
2262 bsg_job->reply_payload.payload_len; 2265 bsg_job->reply_payload.payload_len;
2263 } 2266 }
2264 sp->done(vha, sp, res); 2267 sp->done(vha, sp, res);
@@ -3252,7 +3255,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3252{ 3255{
3253 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3256 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3254 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 3257 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
3255 struct fc_bsg_job *bsg_job; 3258 struct bsg_job *bsg_job;
3259 struct fc_bsg_request *bsg_request;
3256 struct fxdisc_entry_fx00 fx_iocb; 3260 struct fxdisc_entry_fx00 fx_iocb;
3257 uint8_t entry_cnt = 1; 3261 uint8_t entry_cnt = 1;
3258 3262
@@ -3301,8 +3305,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3301 } else { 3305 } else {
3302 struct scatterlist *sg; 3306 struct scatterlist *sg;
3303 bsg_job = sp->u.bsg_job; 3307 bsg_job = sp->u.bsg_job;
3308 bsg_request = bsg_job->request;
3304 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 3309 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
3305 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 3310 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3306 3311
3307 fx_iocb.func_num = piocb_rqst->func_type; 3312 fx_iocb.func_num = piocb_rqst->func_type;
3308 fx_iocb.adapid = piocb_rqst->adapid; 3313 fx_iocb.adapid = piocb_rqst->adapid;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7cfc270bd08..aeebefb1e9f8 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -409,18 +409,9 @@ struct qla4_8xxx_legacy_intr_set {
409 409
410/* MSI-X Support */ 410/* MSI-X Support */
411 411
412#define QLA_MSIX_DEFAULT 0x00 412#define QLA_MSIX_DEFAULT 0
413#define QLA_MSIX_RSP_Q 0x01 413#define QLA_MSIX_RSP_Q 1
414
415#define QLA_MSIX_ENTRIES 2 414#define QLA_MSIX_ENTRIES 2
416#define QLA_MIDX_DEFAULT 0
417#define QLA_MIDX_RSP_Q 1
418
419struct ql4_msix_entry {
420 int have_irq;
421 uint16_t msix_vector;
422 uint16_t msix_entry;
423};
424 415
425/* 416/*
426 * ISP Operations 417 * ISP Operations
@@ -572,9 +563,6 @@ struct scsi_qla_host {
572#define AF_IRQ_ATTACHED 10 /* 0x00000400 */ 563#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
573#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ 564#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
574#define AF_HA_REMOVAL 12 /* 0x00001000 */ 565#define AF_HA_REMOVAL 12 /* 0x00001000 */
575#define AF_INTx_ENABLED 15 /* 0x00008000 */
576#define AF_MSI_ENABLED 16 /* 0x00010000 */
577#define AF_MSIX_ENABLED 17 /* 0x00020000 */
578#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */ 566#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */
579#define AF_FW_RECOVERY 19 /* 0x00080000 */ 567#define AF_FW_RECOVERY 19 /* 0x00080000 */
580#define AF_EEH_BUSY 20 /* 0x00100000 */ 568#define AF_EEH_BUSY 20 /* 0x00100000 */
@@ -762,8 +750,6 @@ struct scsi_qla_host {
762 struct isp_operations *isp_ops; 750 struct isp_operations *isp_ops;
763 struct ql82xx_hw_data hw; 751 struct ql82xx_hw_data hw;
764 752
765 struct ql4_msix_entry msix_entries[QLA_MSIX_ENTRIES];
766
767 uint32_t nx_dev_init_timeout; 753 uint32_t nx_dev_init_timeout;
768 uint32_t nx_reset_timeout; 754 uint32_t nx_reset_timeout;
769 void *fw_dump; 755 void *fw_dump;
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 2559144f5475..bce96a58f14e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -134,7 +134,6 @@ int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
134void qla4_82xx_enable_intrs(struct scsi_qla_host *ha); 134void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
135void qla4_82xx_disable_intrs(struct scsi_qla_host *ha); 135void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
136int qla4_8xxx_enable_msix(struct scsi_qla_host *ha); 136int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
137void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
138irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id); 137irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
139irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id); 138irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
140irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id); 139irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 4f9c0f2be89d..d2cd33d8d67f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1107,7 +1107,7 @@ static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
1107 DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n")); 1107 DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
1108 if (is_qla8022(ha)) { 1108 if (is_qla8022(ha)) {
1109 writel(0, &ha->qla4_82xx_reg->host_int); 1109 writel(0, &ha->qla4_82xx_reg->host_int);
1110 if (test_bit(AF_INTx_ENABLED, &ha->flags)) 1110 if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
1111 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 1111 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
1112 0xfbff); 1112 0xfbff);
1113 } 1113 }
@@ -1564,19 +1564,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1564 1564
1565try_msi: 1565try_msi:
1566 /* Trying MSI */ 1566 /* Trying MSI */
1567 ret = pci_enable_msi(ha->pdev); 1567 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
1568 if (!ret) { 1568 if (ret > 0) {
1569 ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler, 1569 ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1570 0, DRIVER_NAME, ha); 1570 0, DRIVER_NAME, ha);
1571 if (!ret) { 1571 if (!ret) {
1572 DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n")); 1572 DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1573 set_bit(AF_MSI_ENABLED, &ha->flags);
1574 goto irq_attached; 1573 goto irq_attached;
1575 } else { 1574 } else {
1576 ql4_printk(KERN_WARNING, ha, 1575 ql4_printk(KERN_WARNING, ha,
1577 "MSI: Failed to reserve interrupt %d " 1576 "MSI: Failed to reserve interrupt %d "
1578 "already in use.\n", ha->pdev->irq); 1577 "already in use.\n", ha->pdev->irq);
1579 pci_disable_msi(ha->pdev); 1578 pci_free_irq_vectors(ha->pdev);
1580 } 1579 }
1581 } 1580 }
1582 1581
@@ -1592,7 +1591,6 @@ try_intx:
1592 IRQF_SHARED, DRIVER_NAME, ha); 1591 IRQF_SHARED, DRIVER_NAME, ha);
1593 if (!ret) { 1592 if (!ret) {
1594 DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n")); 1593 DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1595 set_bit(AF_INTx_ENABLED, &ha->flags);
1596 goto irq_attached; 1594 goto irq_attached;
1597 1595
1598 } else { 1596 } else {
@@ -1614,14 +1612,11 @@ irq_not_attached:
1614 1612
1615void qla4xxx_free_irqs(struct scsi_qla_host *ha) 1613void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1616{ 1614{
1617 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) { 1615 if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
1618 if (test_bit(AF_MSIX_ENABLED, &ha->flags)) { 1616 return;
1619 qla4_8xxx_disable_msix(ha); 1617
1620 } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) { 1618 if (ha->pdev->msix_enabled)
1621 free_irq(ha->pdev->irq, ha); 1619 free_irq(pci_irq_vector(ha->pdev, 1), ha);
1622 pci_disable_msi(ha->pdev); 1620 free_irq(pci_irq_vector(ha->pdev, 0), ha);
1623 } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) { 1621 pci_free_irq_vectors(ha->pdev);
1624 free_irq(ha->pdev->irq, ha);
1625 }
1626 }
1627} 1622}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c291fdff1b33..1da04f323d38 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -2032,10 +2032,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
2032 ptid = (uint16_t *)&fw_ddb_entry->isid[1]; 2032 ptid = (uint16_t *)&fw_ddb_entry->isid[1];
2033 *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id); 2033 *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
2034 2034
2035 DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n", 2035 DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid));
2036 fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
2037 fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
2038 fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
2039 2036
2040 iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options); 2037 iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
2041 memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias)); 2038 memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 06ddd13cb7cc..e91abb327745 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3945,7 +3945,7 @@ void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
3945 ha->isp_ops->interrupt_service_routine(ha, intr_status); 3945 ha->isp_ops->interrupt_service_routine(ha, intr_status);
3946 3946
3947 if (test_bit(AF_INTERRUPTS_ON, &ha->flags) && 3947 if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
3948 test_bit(AF_INTx_ENABLED, &ha->flags)) 3948 (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled))
3949 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 3949 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
3950 0xfbff); 3950 0xfbff);
3951 } 3951 }
@@ -4094,12 +4094,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
4094 ha->phy_port_num = sys_info->port_num; 4094 ha->phy_port_num = sys_info->port_num;
4095 ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt; 4095 ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
4096 4096
4097 DEBUG2(printk("scsi%ld: %s: " 4097 DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n",
4098 "mac %02x:%02x:%02x:%02x:%02x:%02x " 4098 ha->host_no, __func__, ha->my_mac, ha->serial_number));
4099 "serial %s\n", ha->host_no, __func__,
4100 ha->my_mac[0], ha->my_mac[1], ha->my_mac[2],
4101 ha->my_mac[3], ha->my_mac[4], ha->my_mac[5],
4102 ha->serial_number));
4103 4099
4104 status = QLA_SUCCESS; 4100 status = QLA_SUCCESS;
4105 4101
@@ -4178,78 +4174,37 @@ qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
4178 spin_unlock_irq(&ha->hardware_lock); 4174 spin_unlock_irq(&ha->hardware_lock);
4179} 4175}
4180 4176
4181struct ql4_init_msix_entry {
4182 uint16_t entry;
4183 uint16_t index;
4184 const char *name;
4185 irq_handler_t handler;
4186};
4187
4188static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = {
4189 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
4190 "qla4xxx (default)",
4191 (irq_handler_t)qla4_8xxx_default_intr_handler },
4192 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
4193 "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q },
4194};
4195
4196void
4197qla4_8xxx_disable_msix(struct scsi_qla_host *ha)
4198{
4199 int i;
4200 struct ql4_msix_entry *qentry;
4201
4202 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
4203 qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
4204 if (qentry->have_irq) {
4205 free_irq(qentry->msix_vector, ha);
4206 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
4207 __func__, qla4_8xxx_msix_entries[i].name));
4208 }
4209 }
4210 pci_disable_msix(ha->pdev);
4211 clear_bit(AF_MSIX_ENABLED, &ha->flags);
4212}
4213
4214int 4177int
4215qla4_8xxx_enable_msix(struct scsi_qla_host *ha) 4178qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
4216{ 4179{
4217 int i, ret; 4180 int ret;
4218 struct msix_entry entries[QLA_MSIX_ENTRIES];
4219 struct ql4_msix_entry *qentry;
4220
4221 for (i = 0; i < QLA_MSIX_ENTRIES; i++)
4222 entries[i].entry = qla4_8xxx_msix_entries[i].entry;
4223 4181
4224 ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries)); 4182 ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES,
4225 if (ret) { 4183 QLA_MSIX_ENTRIES, PCI_IRQ_MSIX);
4184 if (ret < 0) {
4226 ql4_printk(KERN_WARNING, ha, 4185 ql4_printk(KERN_WARNING, ha,
4227 "MSI-X: Failed to enable support -- %d/%d\n", 4186 "MSI-X: Failed to enable support -- %d/%d\n",
4228 QLA_MSIX_ENTRIES, ret); 4187 QLA_MSIX_ENTRIES, ret);
4229 goto msix_out; 4188 return ret;
4230 }
4231 set_bit(AF_MSIX_ENABLED, &ha->flags);
4232
4233 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
4234 qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
4235 qentry->msix_vector = entries[i].vector;
4236 qentry->msix_entry = entries[i].entry;
4237 qentry->have_irq = 0;
4238 ret = request_irq(qentry->msix_vector,
4239 qla4_8xxx_msix_entries[i].handler, 0,
4240 qla4_8xxx_msix_entries[i].name, ha);
4241 if (ret) {
4242 ql4_printk(KERN_WARNING, ha,
4243 "MSI-X: Unable to register handler -- %x/%d.\n",
4244 qla4_8xxx_msix_entries[i].index, ret);
4245 qla4_8xxx_disable_msix(ha);
4246 goto msix_out;
4247 }
4248 qentry->have_irq = 1;
4249 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
4250 __func__, qla4_8xxx_msix_entries[i].name));
4251 } 4189 }
4252msix_out: 4190
4191 ret = request_irq(pci_irq_vector(ha->pdev, 0),
4192 qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)",
4193 ha);
4194 if (ret)
4195 goto out_free_vectors;
4196
4197 ret = request_irq(pci_irq_vector(ha->pdev, 1),
4198 qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha);
4199 if (ret)
4200 goto out_free_default_irq;
4201
4202 return 0;
4203
4204out_free_default_irq:
4205 free_irq(pci_irq_vector(ha->pdev, 0), ha);
4206out_free_vectors:
4207 pci_free_irq_vectors(ha->pdev);
4253 return ret; 4208 return ret;
4254} 4209}
4255 4210
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 01c3610a60cf..9fbb33fc90c7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6304,13 +6304,9 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
6304 * ISID would not match firmware generated ISID. 6304 * ISID would not match firmware generated ISID.
6305 */ 6305 */
6306 if (is_isid_compare) { 6306 if (is_isid_compare) {
6307 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x" 6307 DEBUG2(ql4_printk(KERN_INFO, ha,
6308 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n", 6308 "%s: old ISID [%pmR] New ISID [%pmR]\n",
6309 __func__, old_tddb->isid[5], old_tddb->isid[4], 6309 __func__, old_tddb->isid, new_tddb->isid));
6310 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
6311 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
6312 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
6313 new_tddb->isid[0]));
6314 6310
6315 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6311 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
6316 sizeof(old_tddb->isid))) 6312 sizeof(old_tddb->isid)))
@@ -7925,10 +7921,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
7925 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); 7921 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
7926 break; 7922 break;
7927 case ISCSI_FLASHNODE_ISID: 7923 case ISCSI_FLASHNODE_ISID:
7928 rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", 7924 rc = sprintf(buf, "%pm\n", fnode_sess->isid);
7929 fnode_sess->isid[0], fnode_sess->isid[1],
7930 fnode_sess->isid[2], fnode_sess->isid[3],
7931 fnode_sess->isid[4], fnode_sess->isid[5]);
7932 break; 7925 break;
7933 case ISCSI_FLASHNODE_TSID: 7926 case ISCSI_FLASHNODE_TSID:
7934 rc = sprintf(buf, "%u\n", fnode_sess->tsid); 7927 rc = sprintf(buf, "%u\n", fnode_sess->tsid);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 246456925335..28fea83ae2fe 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -220,8 +220,6 @@ static struct {
220 {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 220 {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
221 {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 221 {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
222 {"NEC", "iStorage", NULL, BLIST_REPORTLUN2}, 222 {"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
223 {"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA},
224 {"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA},
225 {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 223 {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
226 {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 224 {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
227 {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 225 {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9a8ccff1121f..c35b6de4ca64 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1998,6 +1998,15 @@ static void scsi_exit_request(void *data, struct request *rq,
1998 kfree(cmd->sense_buffer); 1998 kfree(cmd->sense_buffer);
1999} 1999}
2000 2000
2001static int scsi_map_queues(struct blk_mq_tag_set *set)
2002{
2003 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
2004
2005 if (shost->hostt->map_queues)
2006 return shost->hostt->map_queues(shost);
2007 return blk_mq_map_queues(set);
2008}
2009
2001static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 2010static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
2002{ 2011{
2003 struct device *host_dev; 2012 struct device *host_dev;
@@ -2090,6 +2099,7 @@ static struct blk_mq_ops scsi_mq_ops = {
2090 .timeout = scsi_timeout, 2099 .timeout = scsi_timeout,
2091 .init_request = scsi_init_request, 2100 .init_request = scsi_init_request,
2092 .exit_request = scsi_exit_request, 2101 .exit_request = scsi_exit_request,
2102 .map_queues = scsi_map_queues,
2093}; 2103};
2094 2104
2095struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) 2105struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@ -2732,6 +2742,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
2732EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2742EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2733 2743
2734/** 2744/**
2745 * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
2746 * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
2747 */
2748static int scsi_request_fn_active(struct scsi_device *sdev)
2749{
2750 struct request_queue *q = sdev->request_queue;
2751 int request_fn_active;
2752
2753 WARN_ON_ONCE(sdev->host->use_blk_mq);
2754
2755 spin_lock_irq(q->queue_lock);
2756 request_fn_active = q->request_fn_active;
2757 spin_unlock_irq(q->queue_lock);
2758
2759 return request_fn_active;
2760}
2761
2762/**
2763 * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
2764 * @sdev: SCSI device pointer.
2765 *
2766 * Wait until the ongoing shost->hostt->queuecommand() calls that are
2767 * invoked from scsi_request_fn() have finished.
2768 */
2769static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
2770{
2771 WARN_ON_ONCE(sdev->host->use_blk_mq);
2772
2773 while (scsi_request_fn_active(sdev))
2774 msleep(20);
2775}
2776
2777/**
2735 * scsi_device_quiesce - Block user issued commands. 2778 * scsi_device_quiesce - Block user issued commands.
2736 * @sdev: scsi device to quiesce. 2779 * @sdev: scsi device to quiesce.
2737 * 2780 *
@@ -2815,8 +2858,7 @@ EXPORT_SYMBOL(scsi_target_resume);
2815 * @sdev: device to block 2858 * @sdev: device to block
2816 * 2859 *
2817 * Block request made by scsi lld's to temporarily stop all 2860 * Block request made by scsi lld's to temporarily stop all
2818 * scsi commands on the specified device. Called from interrupt 2861 * scsi commands on the specified device. May sleep.
2819 * or normal process context.
2820 * 2862 *
2821 * Returns zero if successful or error if not 2863 * Returns zero if successful or error if not
2822 * 2864 *
@@ -2825,6 +2867,10 @@ EXPORT_SYMBOL(scsi_target_resume);
2825 * (which must be a legal transition). When the device is in this 2867 * (which must be a legal transition). When the device is in this
2826 * state, all commands are deferred until the scsi lld reenables 2868 * state, all commands are deferred until the scsi lld reenables
2827 * the device with scsi_device_unblock or device_block_tmo fires. 2869 * the device with scsi_device_unblock or device_block_tmo fires.
2870 *
2871 * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
2872 * scsi_internal_device_block() has blocked a SCSI device and also
2873 * remove the rport mutex lock and unlock calls from srp_queuecommand().
2828 */ 2874 */
2829int 2875int
2830scsi_internal_device_block(struct scsi_device *sdev) 2876scsi_internal_device_block(struct scsi_device *sdev)
@@ -2852,6 +2898,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
2852 spin_lock_irqsave(q->queue_lock, flags); 2898 spin_lock_irqsave(q->queue_lock, flags);
2853 blk_stop_queue(q); 2899 blk_stop_queue(q);
2854 spin_unlock_irqrestore(q->queue_lock, flags); 2900 spin_unlock_irqrestore(q->queue_lock, flags);
2901 scsi_wait_for_queuecommand(sdev);
2855 } 2902 }
2856 2903
2857 return 0; 2904 return 0;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 0f3a3869524b..03577bde6ac5 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -30,6 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/bsg-lib.h>
33#include <scsi/scsi_device.h> 34#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h> 35#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport.h> 36#include <scsi/scsi_transport.h>
@@ -2592,7 +2593,7 @@ fc_rport_final_delete(struct work_struct *work)
2592 2593
2593 2594
2594/** 2595/**
2595 * fc_rport_create - allocates and creates a remote FC port. 2596 * fc_remote_port_create - allocates and creates a remote FC port.
2596 * @shost: scsi host the remote port is connected to. 2597 * @shost: scsi host the remote port is connected to.
2597 * @channel: Channel on shost port connected to. 2598 * @channel: Channel on shost port connected to.
2598 * @ids: The world wide names, fc address, and FC4 port 2599 * @ids: The world wide names, fc address, and FC4 port
@@ -2605,8 +2606,8 @@ fc_rport_final_delete(struct work_struct *work)
2605 * This routine assumes no locks are held on entry. 2606 * This routine assumes no locks are held on entry.
2606 */ 2607 */
2607static struct fc_rport * 2608static struct fc_rport *
2608fc_rport_create(struct Scsi_Host *shost, int channel, 2609fc_remote_port_create(struct Scsi_Host *shost, int channel,
2609 struct fc_rport_identifiers *ids) 2610 struct fc_rport_identifiers *ids)
2610{ 2611{
2611 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2612 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2612 struct fc_internal *fci = to_fc_internal(shost->transportt); 2613 struct fc_internal *fci = to_fc_internal(shost->transportt);
@@ -2914,7 +2915,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2914 spin_unlock_irqrestore(shost->host_lock, flags); 2915 spin_unlock_irqrestore(shost->host_lock, flags);
2915 2916
2916 /* No consistent binding found - create new remote port entry */ 2917 /* No consistent binding found - create new remote port entry */
2917 rport = fc_rport_create(shost, channel, ids); 2918 rport = fc_remote_port_create(shost, channel, ids);
2918 2919
2919 return rport; 2920 return rport;
2920} 2921}
@@ -3554,81 +3555,6 @@ fc_vport_sched_delete(struct work_struct *work)
3554 * BSG support 3555 * BSG support
3555 */ 3556 */
3556 3557
3557
3558/**
3559 * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
3560 * @job: fc_bsg_job that is to be torn down
3561 */
3562static void
3563fc_destroy_bsgjob(struct fc_bsg_job *job)
3564{
3565 unsigned long flags;
3566
3567 spin_lock_irqsave(&job->job_lock, flags);
3568 if (job->ref_cnt) {
3569 spin_unlock_irqrestore(&job->job_lock, flags);
3570 return;
3571 }
3572 spin_unlock_irqrestore(&job->job_lock, flags);
3573
3574 put_device(job->dev); /* release reference for the request */
3575
3576 kfree(job->request_payload.sg_list);
3577 kfree(job->reply_payload.sg_list);
3578 kfree(job);
3579}
3580
3581/**
3582 * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
3583 * completed
3584 * @job: fc_bsg_job that is complete
3585 */
3586static void
3587fc_bsg_jobdone(struct fc_bsg_job *job)
3588{
3589 struct request *req = job->req;
3590 struct request *rsp = req->next_rq;
3591 int err;
3592
3593 err = job->req->errors = job->reply->result;
3594
3595 if (err < 0)
3596 /* we're only returning the result field in the reply */
3597 job->req->sense_len = sizeof(uint32_t);
3598 else
3599 job->req->sense_len = job->reply_len;
3600
3601 /* we assume all request payload was transferred, residual == 0 */
3602 req->resid_len = 0;
3603
3604 if (rsp) {
3605 WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
3606
3607 /* set reply (bidi) residual */
3608 rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
3609 rsp->resid_len);
3610 }
3611 blk_complete_request(req);
3612}
3613
3614/**
3615 * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
3616 * @rq: BSG request that holds the job to be destroyed
3617 */
3618static void fc_bsg_softirq_done(struct request *rq)
3619{
3620 struct fc_bsg_job *job = rq->special;
3621 unsigned long flags;
3622
3623 spin_lock_irqsave(&job->job_lock, flags);
3624 job->state_flags |= FC_RQST_STATE_DONE;
3625 job->ref_cnt--;
3626 spin_unlock_irqrestore(&job->job_lock, flags);
3627
3628 blk_end_request_all(rq, rq->errors);
3629 fc_destroy_bsgjob(job);
3630}
3631
3632/** 3558/**
3633 * fc_bsg_job_timeout - handler for when a bsg request timesout 3559 * fc_bsg_job_timeout - handler for when a bsg request timesout
3634 * @req: request that timed out 3560 * @req: request that timed out
@@ -3636,27 +3562,22 @@ static void fc_bsg_softirq_done(struct request *rq)
3636static enum blk_eh_timer_return 3562static enum blk_eh_timer_return
3637fc_bsg_job_timeout(struct request *req) 3563fc_bsg_job_timeout(struct request *req)
3638{ 3564{
3639 struct fc_bsg_job *job = (void *) req->special; 3565 struct bsg_job *job = (void *) req->special;
3640 struct Scsi_Host *shost = job->shost; 3566 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3567 struct fc_rport *rport = fc_bsg_to_rport(job);
3641 struct fc_internal *i = to_fc_internal(shost->transportt); 3568 struct fc_internal *i = to_fc_internal(shost->transportt);
3642 unsigned long flags; 3569 int err = 0, inflight = 0;
3643 int err = 0, done = 0;
3644 3570
3645 if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED) 3571 if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
3646 return BLK_EH_RESET_TIMER; 3572 return BLK_EH_RESET_TIMER;
3647 3573
3648 spin_lock_irqsave(&job->job_lock, flags); 3574 inflight = bsg_job_get(job);
3649 if (job->state_flags & FC_RQST_STATE_DONE)
3650 done = 1;
3651 else
3652 job->ref_cnt++;
3653 spin_unlock_irqrestore(&job->job_lock, flags);
3654 3575
3655 if (!done && i->f->bsg_timeout) { 3576 if (inflight && i->f->bsg_timeout) {
3656 /* call LLDD to abort the i/o as it has timed out */ 3577 /* call LLDD to abort the i/o as it has timed out */
3657 err = i->f->bsg_timeout(job); 3578 err = i->f->bsg_timeout(job);
3658 if (err == -EAGAIN) { 3579 if (err == -EAGAIN) {
3659 job->ref_cnt--; 3580 bsg_job_put(job);
3660 return BLK_EH_RESET_TIMER; 3581 return BLK_EH_RESET_TIMER;
3661 } else if (err) 3582 } else if (err)
3662 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " 3583 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
@@ -3664,126 +3585,33 @@ fc_bsg_job_timeout(struct request *req)
3664 } 3585 }
3665 3586
3666 /* the blk_end_sync_io() doesn't check the error */ 3587 /* the blk_end_sync_io() doesn't check the error */
3667 if (done) 3588 if (!inflight)
3668 return BLK_EH_NOT_HANDLED; 3589 return BLK_EH_NOT_HANDLED;
3669 else 3590 else
3670 return BLK_EH_HANDLED; 3591 return BLK_EH_HANDLED;
3671} 3592}
3672 3593
3673static int
3674fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
3675{
3676 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
3677
3678 BUG_ON(!req->nr_phys_segments);
3679
3680 buf->sg_list = kzalloc(sz, GFP_KERNEL);
3681 if (!buf->sg_list)
3682 return -ENOMEM;
3683 sg_init_table(buf->sg_list, req->nr_phys_segments);
3684 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
3685 buf->payload_len = blk_rq_bytes(req);
3686 return 0;
3687}
3688
3689
3690/**
3691 * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
3692 * bsg request
3693 * @shost: SCSI Host corresponding to the bsg object
3694 * @rport: (optional) FC Remote Port corresponding to the bsg object
3695 * @req: BSG request that needs a job structure
3696 */
3697static int
3698fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
3699 struct request *req)
3700{
3701 struct fc_internal *i = to_fc_internal(shost->transportt);
3702 struct request *rsp = req->next_rq;
3703 struct fc_bsg_job *job;
3704 int ret;
3705
3706 BUG_ON(req->special);
3707
3708 job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
3709 GFP_KERNEL);
3710 if (!job)
3711 return -ENOMEM;
3712
3713 /*
3714 * Note: this is a bit silly.
3715 * The request gets formatted as a SGIO v4 ioctl request, which
3716 * then gets reformatted as a blk request, which then gets
3717 * reformatted as a fc bsg request. And on completion, we have
3718 * to wrap return results such that SGIO v4 thinks it was a scsi
3719 * status. I hope this was all worth it.
3720 */
3721
3722 req->special = job;
3723 job->shost = shost;
3724 job->rport = rport;
3725 job->req = req;
3726 if (i->f->dd_bsg_size)
3727 job->dd_data = (void *)&job[1];
3728 spin_lock_init(&job->job_lock);
3729 job->request = (struct fc_bsg_request *)req->cmd;
3730 job->request_len = req->cmd_len;
3731 job->reply = req->sense;
3732 job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
3733 * allocated */
3734 if (req->bio) {
3735 ret = fc_bsg_map_buffer(&job->request_payload, req);
3736 if (ret)
3737 goto failjob_rls_job;
3738 }
3739 if (rsp && rsp->bio) {
3740 ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
3741 if (ret)
3742 goto failjob_rls_rqst_payload;
3743 }
3744 job->job_done = fc_bsg_jobdone;
3745 if (rport)
3746 job->dev = &rport->dev;
3747 else
3748 job->dev = &shost->shost_gendev;
3749 get_device(job->dev); /* take a reference for the request */
3750
3751 job->ref_cnt = 1;
3752
3753 return 0;
3754
3755
3756failjob_rls_rqst_payload:
3757 kfree(job->request_payload.sg_list);
3758failjob_rls_job:
3759 kfree(job);
3760 return -ENOMEM;
3761}
3762
3763
3764enum fc_dispatch_result {
3765 FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
3766 FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
3767 FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
3768};
3769
3770
3771/** 3594/**
3772 * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD 3595 * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
3773 * @q: fc host request queue
3774 * @shost: scsi host rport attached to 3596 * @shost: scsi host rport attached to
3775 * @job: bsg job to be processed 3597 * @job: bsg job to be processed
3776 */ 3598 */
3777static enum fc_dispatch_result 3599static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
3778fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3779 struct fc_bsg_job *job)
3780{ 3600{
3781 struct fc_internal *i = to_fc_internal(shost->transportt); 3601 struct fc_internal *i = to_fc_internal(shost->transportt);
3602 struct fc_bsg_request *bsg_request = job->request;
3603 struct fc_bsg_reply *bsg_reply = job->reply;
3782 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ 3604 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
3783 int ret; 3605 int ret;
3784 3606
3607 /* check if we really have all the request data needed */
3608 if (job->request_len < cmdlen) {
3609 ret = -ENOMSG;
3610 goto fail_host_msg;
3611 }
3612
3785 /* Validate the host command */ 3613 /* Validate the host command */
3786 switch (job->request->msgcode) { 3614 switch (bsg_request->msgcode) {
3787 case FC_BSG_HST_ADD_RPORT: 3615 case FC_BSG_HST_ADD_RPORT:
3788 cmdlen += sizeof(struct fc_bsg_host_add_rport); 3616 cmdlen += sizeof(struct fc_bsg_host_add_rport);
3789 break; 3617 break;
@@ -3815,7 +3643,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3815 case FC_BSG_HST_VENDOR: 3643 case FC_BSG_HST_VENDOR:
3816 cmdlen += sizeof(struct fc_bsg_host_vendor); 3644 cmdlen += sizeof(struct fc_bsg_host_vendor);
3817 if ((shost->hostt->vendor_id == 0L) || 3645 if ((shost->hostt->vendor_id == 0L) ||
3818 (job->request->rqst_data.h_vendor.vendor_id != 3646 (bsg_request->rqst_data.h_vendor.vendor_id !=
3819 shost->hostt->vendor_id)) { 3647 shost->hostt->vendor_id)) {
3820 ret = -ESRCH; 3648 ret = -ESRCH;
3821 goto fail_host_msg; 3649 goto fail_host_msg;
@@ -3827,24 +3655,19 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3827 goto fail_host_msg; 3655 goto fail_host_msg;
3828 } 3656 }
3829 3657
3830 /* check if we really have all the request data needed */
3831 if (job->request_len < cmdlen) {
3832 ret = -ENOMSG;
3833 goto fail_host_msg;
3834 }
3835
3836 ret = i->f->bsg_request(job); 3658 ret = i->f->bsg_request(job);
3837 if (!ret) 3659 if (!ret)
3838 return FC_DISPATCH_UNLOCKED; 3660 return 0;
3839 3661
3840fail_host_msg: 3662fail_host_msg:
3841 /* return the errno failure code as the only status */ 3663 /* return the errno failure code as the only status */
3842 BUG_ON(job->reply_len < sizeof(uint32_t)); 3664 BUG_ON(job->reply_len < sizeof(uint32_t));
3843 job->reply->reply_payload_rcv_len = 0; 3665 bsg_reply->reply_payload_rcv_len = 0;
3844 job->reply->result = ret; 3666 bsg_reply->result = ret;
3845 job->reply_len = sizeof(uint32_t); 3667 job->reply_len = sizeof(uint32_t);
3846 fc_bsg_jobdone(job); 3668 bsg_job_done(job, bsg_reply->result,
3847 return FC_DISPATCH_UNLOCKED; 3669 bsg_reply->reply_payload_rcv_len);
3670 return 0;
3848} 3671}
3849 3672
3850 3673
@@ -3855,34 +3678,38 @@ fail_host_msg:
3855static void 3678static void
3856fc_bsg_goose_queue(struct fc_rport *rport) 3679fc_bsg_goose_queue(struct fc_rport *rport)
3857{ 3680{
3858 if (!rport->rqst_q) 3681 struct request_queue *q = rport->rqst_q;
3682 unsigned long flags;
3683
3684 if (!q)
3859 return; 3685 return;
3860 3686
3861 /* 3687 spin_lock_irqsave(q->queue_lock, flags);
3862 * This get/put dance makes no sense 3688 blk_run_queue_async(q);
3863 */ 3689 spin_unlock_irqrestore(q->queue_lock, flags);
3864 get_device(&rport->dev);
3865 blk_run_queue_async(rport->rqst_q);
3866 put_device(&rport->dev);
3867} 3690}
3868 3691
3869/** 3692/**
3870 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD 3693 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
3871 * @q: rport request queue
3872 * @shost: scsi host rport attached to 3694 * @shost: scsi host rport attached to
3873 * @rport: rport request destined to
3874 * @job: bsg job to be processed 3695 * @job: bsg job to be processed
3875 */ 3696 */
3876static enum fc_dispatch_result 3697static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
3877fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3878 struct fc_rport *rport, struct fc_bsg_job *job)
3879{ 3698{
3880 struct fc_internal *i = to_fc_internal(shost->transportt); 3699 struct fc_internal *i = to_fc_internal(shost->transportt);
3700 struct fc_bsg_request *bsg_request = job->request;
3701 struct fc_bsg_reply *bsg_reply = job->reply;
3881 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ 3702 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
3882 int ret; 3703 int ret;
3883 3704
3705 /* check if we really have all the request data needed */
3706 if (job->request_len < cmdlen) {
3707 ret = -ENOMSG;
3708 goto fail_rport_msg;
3709 }
3710
3884 /* Validate the rport command */ 3711 /* Validate the rport command */
3885 switch (job->request->msgcode) { 3712 switch (bsg_request->msgcode) {
3886 case FC_BSG_RPT_ELS: 3713 case FC_BSG_RPT_ELS:
3887 cmdlen += sizeof(struct fc_bsg_rport_els); 3714 cmdlen += sizeof(struct fc_bsg_rport_els);
3888 goto check_bidi; 3715 goto check_bidi;
@@ -3902,133 +3729,31 @@ check_bidi:
3902 goto fail_rport_msg; 3729 goto fail_rport_msg;
3903 } 3730 }
3904 3731
3905 /* check if we really have all the request data needed */
3906 if (job->request_len < cmdlen) {
3907 ret = -ENOMSG;
3908 goto fail_rport_msg;
3909 }
3910
3911 ret = i->f->bsg_request(job); 3732 ret = i->f->bsg_request(job);
3912 if (!ret) 3733 if (!ret)
3913 return FC_DISPATCH_UNLOCKED; 3734 return 0;
3914 3735
3915fail_rport_msg: 3736fail_rport_msg:
3916 /* return the errno failure code as the only status */ 3737 /* return the errno failure code as the only status */
3917 BUG_ON(job->reply_len < sizeof(uint32_t)); 3738 BUG_ON(job->reply_len < sizeof(uint32_t));
3918 job->reply->reply_payload_rcv_len = 0; 3739 bsg_reply->reply_payload_rcv_len = 0;
3919 job->reply->result = ret; 3740 bsg_reply->result = ret;
3920 job->reply_len = sizeof(uint32_t); 3741 job->reply_len = sizeof(uint32_t);
3921 fc_bsg_jobdone(job); 3742 bsg_job_done(job, bsg_reply->result,
3922 return FC_DISPATCH_UNLOCKED; 3743 bsg_reply->reply_payload_rcv_len);
3923} 3744 return 0;
3924
3925
3926/**
3927 * fc_bsg_request_handler - generic handler for bsg requests
3928 * @q: request queue to manage
3929 * @shost: Scsi_Host related to the bsg object
3930 * @rport: FC remote port related to the bsg object (optional)
3931 * @dev: device structure for bsg object
3932 */
3933static void
3934fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3935 struct fc_rport *rport, struct device *dev)
3936{
3937 struct request *req;
3938 struct fc_bsg_job *job;
3939 enum fc_dispatch_result ret;
3940
3941 if (!get_device(dev))
3942 return;
3943
3944 while (1) {
3945 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
3946 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
3947 break;
3948
3949 req = blk_fetch_request(q);
3950 if (!req)
3951 break;
3952
3953 if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
3954 req->errors = -ENXIO;
3955 spin_unlock_irq(q->queue_lock);
3956 blk_end_request_all(req, -ENXIO);
3957 spin_lock_irq(q->queue_lock);
3958 continue;
3959 }
3960
3961 spin_unlock_irq(q->queue_lock);
3962
3963 ret = fc_req_to_bsgjob(shost, rport, req);
3964 if (ret) {
3965 req->errors = ret;
3966 blk_end_request_all(req, ret);
3967 spin_lock_irq(q->queue_lock);
3968 continue;
3969 }
3970
3971 job = req->special;
3972
3973 /* check if we have the msgcode value at least */
3974 if (job->request_len < sizeof(uint32_t)) {
3975 BUG_ON(job->reply_len < sizeof(uint32_t));
3976 job->reply->reply_payload_rcv_len = 0;
3977 job->reply->result = -ENOMSG;
3978 job->reply_len = sizeof(uint32_t);
3979 fc_bsg_jobdone(job);
3980 spin_lock_irq(q->queue_lock);
3981 continue;
3982 }
3983
3984 /* the dispatch routines will unlock the queue_lock */
3985 if (rport)
3986 ret = fc_bsg_rport_dispatch(q, shost, rport, job);
3987 else
3988 ret = fc_bsg_host_dispatch(q, shost, job);
3989
3990 /* did dispatcher hit state that can't process any more */
3991 if (ret == FC_DISPATCH_BREAK)
3992 break;
3993
3994 /* did dispatcher had released the lock */
3995 if (ret == FC_DISPATCH_UNLOCKED)
3996 spin_lock_irq(q->queue_lock);
3997 }
3998
3999 spin_unlock_irq(q->queue_lock);
4000 put_device(dev);
4001 spin_lock_irq(q->queue_lock);
4002}
4003
4004
4005/**
4006 * fc_bsg_host_handler - handler for bsg requests for a fc host
4007 * @q: fc host request queue
4008 */
4009static void
4010fc_bsg_host_handler(struct request_queue *q)
4011{
4012 struct Scsi_Host *shost = q->queuedata;
4013
4014 fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
4015} 3745}
4016 3746
4017 3747static int fc_bsg_dispatch(struct bsg_job *job)
4018/**
4019 * fc_bsg_rport_handler - handler for bsg requests for a fc rport
4020 * @q: rport request queue
4021 */
4022static void
4023fc_bsg_rport_handler(struct request_queue *q)
4024{ 3748{
4025 struct fc_rport *rport = q->queuedata; 3749 struct Scsi_Host *shost = fc_bsg_to_shost(job);
4026 struct Scsi_Host *shost = rport_to_shost(rport);
4027 3750
4028 fc_bsg_request_handler(q, shost, rport, &rport->dev); 3751 if (scsi_is_fc_rport(job->dev))
3752 return fc_bsg_rport_dispatch(shost, job);
3753 else
3754 return fc_bsg_host_dispatch(shost, job);
4029} 3755}
4030 3756
4031
4032/** 3757/**
4033 * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests 3758 * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
4034 * @shost: shost for fc_host 3759 * @shost: shost for fc_host
@@ -4051,33 +3776,42 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
4051 snprintf(bsg_name, sizeof(bsg_name), 3776 snprintf(bsg_name, sizeof(bsg_name),
4052 "fc_host%d", shost->host_no); 3777 "fc_host%d", shost->host_no);
4053 3778
4054 q = __scsi_alloc_queue(shost, fc_bsg_host_handler); 3779 q = __scsi_alloc_queue(shost, bsg_request_fn);
4055 if (!q) { 3780 if (!q) {
4056 printk(KERN_ERR "fc_host%d: bsg interface failed to " 3781 dev_err(dev,
4057 "initialize - no request queue\n", 3782 "fc_host%d: bsg interface failed to initialize - no request queue\n",
4058 shost->host_no); 3783 shost->host_no);
4059 return -ENOMEM; 3784 return -ENOMEM;
4060 } 3785 }
4061 3786
4062 q->queuedata = shost; 3787 err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch,
4063 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); 3788 i->f->dd_bsg_size);
4064 blk_queue_softirq_done(q, fc_bsg_softirq_done);
4065 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
4066 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
4067
4068 err = bsg_register_queue(q, dev, bsg_name, NULL);
4069 if (err) { 3789 if (err) {
4070 printk(KERN_ERR "fc_host%d: bsg interface failed to " 3790 dev_err(dev,
4071 "initialize - register queue\n", 3791 "fc_host%d: bsg interface failed to initialize - setup queue\n",
4072 shost->host_no); 3792 shost->host_no);
4073 blk_cleanup_queue(q); 3793 blk_cleanup_queue(q);
4074 return err; 3794 return err;
4075 } 3795 }
4076 3796 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3797 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
4077 fc_host->rqst_q = q; 3798 fc_host->rqst_q = q;
4078 return 0; 3799 return 0;
4079} 3800}
4080 3801
3802static int fc_bsg_rport_prep(struct request_queue *q, struct request *req)
3803{
3804 struct fc_rport *rport = dev_to_rport(q->queuedata);
3805
3806 if (rport->port_state == FC_PORTSTATE_BLOCKED &&
3807 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
3808 return BLKPREP_DEFER;
3809
3810 if (rport->port_state != FC_PORTSTATE_ONLINE)
3811 return BLKPREP_KILL;
3812
3813 return BLKPREP_OK;
3814}
4081 3815
4082/** 3816/**
4083 * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests 3817 * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
@@ -4097,29 +3831,22 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
4097 if (!i->f->bsg_request) 3831 if (!i->f->bsg_request)
4098 return -ENOTSUPP; 3832 return -ENOTSUPP;
4099 3833
4100 q = __scsi_alloc_queue(shost, fc_bsg_rport_handler); 3834 q = __scsi_alloc_queue(shost, bsg_request_fn);
4101 if (!q) { 3835 if (!q) {
4102 printk(KERN_ERR "%s: bsg interface failed to " 3836 dev_err(dev, "bsg interface failed to initialize - no request queue\n");
4103 "initialize - no request queue\n",
4104 dev->kobj.name);
4105 return -ENOMEM; 3837 return -ENOMEM;
4106 } 3838 }
4107 3839
4108 q->queuedata = rport; 3840 err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
4109 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
4110 blk_queue_softirq_done(q, fc_bsg_softirq_done);
4111 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
4112 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
4113
4114 err = bsg_register_queue(q, dev, NULL, NULL);
4115 if (err) { 3841 if (err) {
4116 printk(KERN_ERR "%s: bsg interface failed to " 3842 dev_err(dev, "failed to setup bsg queue\n");
4117 "initialize - register queue\n",
4118 dev->kobj.name);
4119 blk_cleanup_queue(q); 3843 blk_cleanup_queue(q);
4120 return err; 3844 return err;
4121 } 3845 }
4122 3846
3847 blk_queue_prep_rq(q, fc_bsg_rport_prep);
3848 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3849 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
4123 rport->rqst_q = q; 3850 rport->rqst_q = q;
4124 return 0; 3851 return 0;
4125} 3852}
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index e3cd3ece4412..b87a78673f65 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -24,7 +24,6 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/delay.h>
28 27
29#include <scsi/scsi.h> 28#include <scsi/scsi.h>
30#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -115,21 +114,12 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
115static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports", 114static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
116 NULL, NULL, NULL); 115 NULL, NULL, NULL);
117 116
118#define SRP_PID(p) \
119 (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
120 (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
121 (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
122 (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
123
124#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
125 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
126
127static ssize_t 117static ssize_t
128show_srp_rport_id(struct device *dev, struct device_attribute *attr, 118show_srp_rport_id(struct device *dev, struct device_attribute *attr,
129 char *buf) 119 char *buf)
130{ 120{
131 struct srp_rport *rport = transport_class_to_srp_rport(dev); 121 struct srp_rport *rport = transport_class_to_srp_rport(dev);
132 return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport)); 122 return sprintf(buf, "%16phC\n", rport->port_id);
133} 123}
134 124
135static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL); 125static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
@@ -402,36 +392,6 @@ static void srp_reconnect_work(struct work_struct *work)
402 } 392 }
403} 393}
404 394
405/**
406 * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
407 * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
408 *
409 * To do: add support for scsi-mq in this function.
410 */
411static int scsi_request_fn_active(struct Scsi_Host *shost)
412{
413 struct scsi_device *sdev;
414 struct request_queue *q;
415 int request_fn_active = 0;
416
417 shost_for_each_device(sdev, shost) {
418 q = sdev->request_queue;
419
420 spin_lock_irq(q->queue_lock);
421 request_fn_active += q->request_fn_active;
422 spin_unlock_irq(q->queue_lock);
423 }
424
425 return request_fn_active;
426}
427
428/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
429static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
430{
431 while (scsi_request_fn_active(shost))
432 msleep(20);
433}
434
435static void __rport_fail_io_fast(struct srp_rport *rport) 395static void __rport_fail_io_fast(struct srp_rport *rport)
436{ 396{
437 struct Scsi_Host *shost = rport_to_shost(rport); 397 struct Scsi_Host *shost = rport_to_shost(rport);
@@ -441,14 +401,17 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
441 401
442 if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) 402 if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
443 return; 403 return;
404 /*
405 * Call scsi_target_block() to wait for ongoing shost->queuecommand()
406 * calls before invoking i->f->terminate_rport_io().
407 */
408 scsi_target_block(rport->dev.parent);
444 scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); 409 scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
445 410
446 /* Involve the LLD if possible to terminate all I/O on the rport. */ 411 /* Involve the LLD if possible to terminate all I/O on the rport. */
447 i = to_srp_internal(shost->transportt); 412 i = to_srp_internal(shost->transportt);
448 if (i->f->terminate_rport_io) { 413 if (i->f->terminate_rport_io)
449 srp_wait_for_queuecommand(shost);
450 i->f->terminate_rport_io(rport); 414 i->f->terminate_rport_io(rport);
451 }
452} 415}
453 416
454/** 417/**
@@ -576,7 +539,6 @@ int srp_reconnect_rport(struct srp_rport *rport)
576 if (res) 539 if (res)
577 goto out; 540 goto out;
578 scsi_target_block(&shost->shost_gendev); 541 scsi_target_block(&shost->shost_gendev);
579 srp_wait_for_queuecommand(shost);
580 res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; 542 res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
581 pr_debug("%s (state %d): transport.reconnect() returned %d\n", 543 pr_debug("%s (state %d): transport.reconnect() returned %d\n",
582 dev_name(&shost->shost_gendev), rport->state, res); 544 dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 079c2d9759fb..1622e23138e0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2465,9 +2465,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2465 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2465 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2466 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2466 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2467 sdkp->write_prot ? "on" : "off"); 2467 sdkp->write_prot ? "on" : "off");
2468 sd_printk(KERN_DEBUG, sdkp, 2468 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
2469 "Mode Sense: %02x %02x %02x %02x\n",
2470 buffer[0], buffer[1], buffer[2], buffer[3]);
2471 } 2469 }
2472 } 2470 }
2473} 2471}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07b6444d3e0a..b673825f46b5 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -929,8 +929,6 @@ struct pqi_ctrl_info {
929 int max_msix_vectors; 929 int max_msix_vectors;
930 int num_msix_vectors_enabled; 930 int num_msix_vectors_enabled;
931 int num_msix_vectors_initialized; 931 int num_msix_vectors_initialized;
932 u32 msix_vectors[PQI_MAX_MSIX_VECTORS];
933 void *intr_data[PQI_MAX_MSIX_VECTORS];
934 int event_irq; 932 int event_irq;
935 struct Scsi_Host *scsi_host; 933 struct Scsi_Host *scsi_host;
936 934
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a535b2661f38..8702d9cf8040 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -25,6 +25,7 @@
25#include <linux/rtc.h> 25#include <linux/rtc.h>
26#include <linux/bcd.h> 26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h> 27#include <linux/cciss_ioctl.h>
28#include <linux/blk-mq-pci.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
@@ -2887,19 +2888,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
2887 2888
2888static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 2889static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2889{ 2890{
2891 struct pci_dev *pdev = ctrl_info->pci_dev;
2890 int i; 2892 int i;
2891 int rc; 2893 int rc;
2892 2894
2893 ctrl_info->event_irq = ctrl_info->msix_vectors[0]; 2895 ctrl_info->event_irq = pci_irq_vector(pdev, 0);
2894 2896
2895 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 2897 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2896 rc = request_irq(ctrl_info->msix_vectors[i], 2898 rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
2897 pqi_irq_handler, 0, 2899 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
2898 DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
2899 if (rc) { 2900 if (rc) {
2900 dev_err(&ctrl_info->pci_dev->dev, 2901 dev_err(&pdev->dev,
2901 "irq %u init failed with error %d\n", 2902 "irq %u init failed with error %d\n",
2902 ctrl_info->msix_vectors[i], rc); 2903 pci_irq_vector(pdev, i), rc);
2903 return rc; 2904 return rc;
2904 } 2905 }
2905 ctrl_info->num_msix_vectors_initialized++; 2906 ctrl_info->num_msix_vectors_initialized++;
@@ -2908,72 +2909,23 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2908 return 0; 2909 return 0;
2909} 2910}
2910 2911
2911static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2912{
2913 int i;
2914
2915 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2916 free_irq(ctrl_info->msix_vectors[i],
2917 ctrl_info->intr_data[i]);
2918}
2919
2920static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 2912static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2921{ 2913{
2922 unsigned int i; 2914 int ret;
2923 int max_vectors;
2924 int num_vectors_enabled;
2925 struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
2926
2927 max_vectors = ctrl_info->num_queue_groups;
2928
2929 for (i = 0; i < max_vectors; i++)
2930 msix_entries[i].entry = i;
2931
2932 num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
2933 msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
2934 2915
2935 if (num_vectors_enabled < 0) { 2916 ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
2917 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
2918 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
2919 if (ret < 0) {
2936 dev_err(&ctrl_info->pci_dev->dev, 2920 dev_err(&ctrl_info->pci_dev->dev,
2937 "MSI-X init failed with error %d\n", 2921 "MSI-X init failed with error %d\n", ret);
2938 num_vectors_enabled); 2922 return ret;
2939 return num_vectors_enabled;
2940 }
2941
2942 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
2943 for (i = 0; i < num_vectors_enabled; i++) {
2944 ctrl_info->msix_vectors[i] = msix_entries[i].vector;
2945 ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
2946 } 2923 }
2947 2924
2925 ctrl_info->num_msix_vectors_enabled = ret;
2948 return 0; 2926 return 0;
2949} 2927}
2950 2928
2951static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2952{
2953 int i;
2954 int rc;
2955 int cpu;
2956
2957 cpu = cpumask_first(cpu_online_mask);
2958 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
2959 rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
2960 get_cpu_mask(cpu));
2961 if (rc)
2962 dev_err(&ctrl_info->pci_dev->dev,
2963 "error %d setting affinity hint for irq vector %u\n",
2964 rc, ctrl_info->msix_vectors[i]);
2965 cpu = cpumask_next(cpu, cpu_online_mask);
2966 }
2967}
2968
2969static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2970{
2971 int i;
2972
2973 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2974 irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
2975}
2976
2977static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 2929static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2978{ 2930{
2979 unsigned int i; 2931 unsigned int i;
@@ -4743,6 +4695,13 @@ static int pqi_slave_configure(struct scsi_device *sdev)
4743 return 0; 4695 return 0;
4744} 4696}
4745 4697
4698static int pqi_map_queues(struct Scsi_Host *shost)
4699{
4700 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
4701
4702 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
4703}
4704
4746static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, 4705static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4747 void __user *arg) 4706 void __user *arg)
4748{ 4707{
@@ -5130,6 +5089,7 @@ static struct scsi_host_template pqi_driver_template = {
5130 .ioctl = pqi_ioctl, 5089 .ioctl = pqi_ioctl,
5131 .slave_alloc = pqi_slave_alloc, 5090 .slave_alloc = pqi_slave_alloc,
5132 .slave_configure = pqi_slave_configure, 5091 .slave_configure = pqi_slave_configure,
5092 .map_queues = pqi_map_queues,
5133 .sdev_attrs = pqi_sdev_attrs, 5093 .sdev_attrs = pqi_sdev_attrs,
5134 .shost_attrs = pqi_shost_attrs, 5094 .shost_attrs = pqi_shost_attrs,
5135}; 5095};
@@ -5159,7 +5119,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5159 shost->cmd_per_lun = shost->can_queue; 5119 shost->cmd_per_lun = shost->can_queue;
5160 shost->sg_tablesize = ctrl_info->sg_tablesize; 5120 shost->sg_tablesize = ctrl_info->sg_tablesize;
5161 shost->transportt = pqi_sas_transport_template; 5121 shost->transportt = pqi_sas_transport_template;
5162 shost->irq = ctrl_info->msix_vectors[0]; 5122 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
5163 shost->unique_id = shost->irq; 5123 shost->unique_id = shost->irq;
5164 shost->nr_hw_queues = ctrl_info->num_queue_groups; 5124 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5165 shost->hostdata[0] = (unsigned long)ctrl_info; 5125 shost->hostdata[0] = (unsigned long)ctrl_info;
@@ -5409,8 +5369,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5409 if (rc) 5369 if (rc)
5410 return rc; 5370 return rc;
5411 5371
5412 pqi_irq_set_affinity_hint(ctrl_info);
5413
5414 rc = pqi_create_queues(ctrl_info); 5372 rc = pqi_create_queues(ctrl_info);
5415 if (rc) 5373 if (rc)
5416 return rc; 5374 return rc;
@@ -5557,10 +5515,14 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5557 5515
5558static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 5516static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5559{ 5517{
5560 pqi_irq_unset_affinity_hint(ctrl_info); 5518 int i;
5561 pqi_free_irqs(ctrl_info); 5519
5562 if (ctrl_info->num_msix_vectors_enabled) 5520 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
5563 pci_disable_msix(ctrl_info->pci_dev); 5521 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
5522 &ctrl_info->queue_groups[i]);
5523 }
5524
5525 pci_free_irq_vectors(ctrl_info->pci_dev);
5564} 5526}
5565 5527
5566static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 5528static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8ccfc9ea874b..05526b71541b 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1495,9 +1495,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1495 if (sg_count) { 1495 if (sg_count) {
1496 if (sg_count > MAX_PAGE_BUFFER_COUNT) { 1496 if (sg_count > MAX_PAGE_BUFFER_COUNT) {
1497 1497
1498 payload_sz = (sg_count * sizeof(void *) + 1498 payload_sz = (sg_count * sizeof(u64) +
1499 sizeof(struct vmbus_packet_mpb_array)); 1499 sizeof(struct vmbus_packet_mpb_array));
1500 payload = kmalloc(payload_sz, GFP_ATOMIC); 1500 payload = kzalloc(payload_sz, GFP_ATOMIC);
1501 if (!payload) 1501 if (!payload)
1502 return SCSI_MLQUEUE_DEVICE_BUSY; 1502 return SCSI_MLQUEUE_DEVICE_BUSY;
1503 } 1503 }
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3c4c07038948..88db6992420e 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -43,20 +43,18 @@
43 43
44#define NCR5380_implementation_fields /* none */ 44#define NCR5380_implementation_fields /* none */
45 45
46#define NCR5380_read(reg) sun3scsi_read(reg) 46#define NCR5380_read(reg) in_8(hostdata->io + (reg))
47#define NCR5380_write(reg, value) sun3scsi_write(reg, value) 47#define NCR5380_write(reg, value) out_8(hostdata->io + (reg), value)
48 48
49#define NCR5380_queue_command sun3scsi_queue_command 49#define NCR5380_queue_command sun3scsi_queue_command
50#define NCR5380_bus_reset sun3scsi_bus_reset 50#define NCR5380_bus_reset sun3scsi_bus_reset
51#define NCR5380_abort sun3scsi_abort 51#define NCR5380_abort sun3scsi_abort
52#define NCR5380_info sun3scsi_info 52#define NCR5380_info sun3scsi_info
53 53
54#define NCR5380_dma_recv_setup(instance, data, count) (count) 54#define NCR5380_dma_xfer_len sun3scsi_dma_xfer_len
55#define NCR5380_dma_send_setup(instance, data, count) (count) 55#define NCR5380_dma_recv_setup sun3scsi_dma_count
56#define NCR5380_dma_residual(instance) \ 56#define NCR5380_dma_send_setup sun3scsi_dma_count
57 sun3scsi_dma_residual(instance) 57#define NCR5380_dma_residual sun3scsi_dma_residual
58#define NCR5380_dma_xfer_len(instance, cmd, phase) \
59 sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd)
60 58
61#define NCR5380_acquire_dma_irq(instance) (1) 59#define NCR5380_acquire_dma_irq(instance) (1)
62#define NCR5380_release_dma_irq(instance) 60#define NCR5380_release_dma_irq(instance)
@@ -82,7 +80,6 @@ module_param(setup_hostid, int, 0);
82#define SUN3_DVMA_BUFSIZE 0xe000 80#define SUN3_DVMA_BUFSIZE 0xe000
83 81
84static struct scsi_cmnd *sun3_dma_setup_done; 82static struct scsi_cmnd *sun3_dma_setup_done;
85static unsigned char *sun3_scsi_regp;
86static volatile struct sun3_dma_regs *dregs; 83static volatile struct sun3_dma_regs *dregs;
87static struct sun3_udc_regs *udc_regs; 84static struct sun3_udc_regs *udc_regs;
88static unsigned char *sun3_dma_orig_addr; 85static unsigned char *sun3_dma_orig_addr;
@@ -90,20 +87,6 @@ static unsigned long sun3_dma_orig_count;
90static int sun3_dma_active; 87static int sun3_dma_active;
91static unsigned long last_residual; 88static unsigned long last_residual;
92 89
93/*
94 * NCR 5380 register access functions
95 */
96
97static inline unsigned char sun3scsi_read(int reg)
98{
99 return in_8(sun3_scsi_regp + reg);
100}
101
102static inline void sun3scsi_write(int reg, int value)
103{
104 out_8(sun3_scsi_regp + reg, value);
105}
106
107#ifndef SUN3_SCSI_VME 90#ifndef SUN3_SCSI_VME
108/* dma controller register access functions */ 91/* dma controller register access functions */
109 92
@@ -158,8 +141,8 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dev)
158} 141}
159 142
160/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ 143/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
161static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance, 144static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata,
162 void *data, unsigned long count, int write_flag) 145 unsigned char *data, int count, int write_flag)
163{ 146{
164 void *addr; 147 void *addr;
165 148
@@ -211,9 +194,10 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
211 dregs->csr |= CSR_FIFO; 194 dregs->csr |= CSR_FIFO;
212 195
213 if(dregs->fifo_count != count) { 196 if(dregs->fifo_count != count) {
214 shost_printk(KERN_ERR, instance, "FIFO mismatch %04x not %04x\n", 197 shost_printk(KERN_ERR, hostdata->host,
198 "FIFO mismatch %04x not %04x\n",
215 dregs->fifo_count, (unsigned int) count); 199 dregs->fifo_count, (unsigned int) count);
216 NCR5380_dprint(NDEBUG_DMA, instance); 200 NCR5380_dprint(NDEBUG_DMA, hostdata->host);
217 } 201 }
218 202
219 /* setup udc */ 203 /* setup udc */
@@ -248,14 +232,34 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
248 232
249} 233}
250 234
251static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) 235static int sun3scsi_dma_count(struct NCR5380_hostdata *hostdata,
236 unsigned char *data, int count)
237{
238 return count;
239}
240
241static inline int sun3scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
242 unsigned char *data, int count)
243{
244 return sun3scsi_dma_setup(hostdata, data, count, 0);
245}
246
247static inline int sun3scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
248 unsigned char *data, int count)
249{
250 return sun3scsi_dma_setup(hostdata, data, count, 1);
251}
252
253static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata)
252{ 254{
253 return last_residual; 255 return last_residual;
254} 256}
255 257
256static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len, 258static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
257 struct scsi_cmnd *cmd) 259 struct scsi_cmnd *cmd)
258{ 260{
261 int wanted_len = cmd->SCp.this_residual;
262
259 if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS) 263 if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
260 return 0; 264 return 0;
261 265
@@ -428,9 +432,10 @@ static struct scsi_host_template sun3_scsi_template = {
428static int __init sun3_scsi_probe(struct platform_device *pdev) 432static int __init sun3_scsi_probe(struct platform_device *pdev)
429{ 433{
430 struct Scsi_Host *instance; 434 struct Scsi_Host *instance;
435 struct NCR5380_hostdata *hostdata;
431 int error; 436 int error;
432 struct resource *irq, *mem; 437 struct resource *irq, *mem;
433 unsigned char *ioaddr; 438 void __iomem *ioaddr;
434 int host_flags = 0; 439 int host_flags = 0;
435#ifdef SUN3_SCSI_VME 440#ifdef SUN3_SCSI_VME
436 int i; 441 int i;
@@ -493,8 +498,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
493 } 498 }
494#endif 499#endif
495 500
496 sun3_scsi_regp = ioaddr;
497
498 instance = scsi_host_alloc(&sun3_scsi_template, 501 instance = scsi_host_alloc(&sun3_scsi_template,
499 sizeof(struct NCR5380_hostdata)); 502 sizeof(struct NCR5380_hostdata));
500 if (!instance) { 503 if (!instance) {
@@ -502,9 +505,12 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
502 goto fail_alloc; 505 goto fail_alloc;
503 } 506 }
504 507
505 instance->io_port = (unsigned long)ioaddr;
506 instance->irq = irq->start; 508 instance->irq = irq->start;
507 509
510 hostdata = shost_priv(instance);
511 hostdata->base = mem->start;
512 hostdata->io = ioaddr;
513
508 error = NCR5380_init(instance, host_flags); 514 error = NCR5380_init(instance, host_flags);
509 if (error) 515 if (error)
510 goto fail_init; 516 goto fail_init;
@@ -552,13 +558,15 @@ fail_init:
552fail_alloc: 558fail_alloc:
553 if (udc_regs) 559 if (udc_regs)
554 dvma_free(udc_regs); 560 dvma_free(udc_regs);
555 iounmap(sun3_scsi_regp); 561 iounmap(ioaddr);
556 return error; 562 return error;
557} 563}
558 564
559static int __exit sun3_scsi_remove(struct platform_device *pdev) 565static int __exit sun3_scsi_remove(struct platform_device *pdev)
560{ 566{
561 struct Scsi_Host *instance = platform_get_drvdata(pdev); 567 struct Scsi_Host *instance = platform_get_drvdata(pdev);
568 struct NCR5380_hostdata *hostdata = shost_priv(instance);
569 void __iomem *ioaddr = hostdata->io;
562 570
563 scsi_remove_host(instance); 571 scsi_remove_host(instance);
564 free_irq(instance->irq, instance); 572 free_irq(instance->irq, instance);
@@ -566,7 +574,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev)
566 scsi_host_put(instance); 574 scsi_host_put(instance);
567 if (udc_regs) 575 if (udc_regs)
568 dvma_free(udc_regs); 576 dvma_free(udc_regs);
569 iounmap(sun3_scsi_regp); 577 iounmap(ioaddr);
570 return 0; 578 return 0;
571} 579}
572 580
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aedf73f1131..aa43bfea0d00 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1094,10 +1094,12 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
1094 * ufs_qcom_setup_clocks - enables/disable clocks 1094 * ufs_qcom_setup_clocks - enables/disable clocks
1095 * @hba: host controller instance 1095 * @hba: host controller instance
1096 * @on: If true, enable clocks else disable them. 1096 * @on: If true, enable clocks else disable them.
1097 * @status: PRE_CHANGE or POST_CHANGE notify
1097 * 1098 *
1098 * Returns 0 on success, non-zero on failure. 1099 * Returns 0 on success, non-zero on failure.
1099 */ 1100 */
1100static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) 1101static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1102 enum ufs_notify_change_status status)
1101{ 1103{
1102 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1104 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1103 int err; 1105 int err;
@@ -1111,18 +1113,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
1111 if (!host) 1113 if (!host)
1112 return 0; 1114 return 0;
1113 1115
1114 if (on) { 1116 if (on && (status == POST_CHANGE)) {
1115 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy); 1117 phy_power_on(host->generic_phy);
1116 if (err)
1117 goto out;
1118 1118
1119 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1120 if (err) {
1121 dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1122 __func__, err);
1123 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1124 goto out;
1125 }
1126 /* enable the device ref clock for HS mode*/ 1119 /* enable the device ref clock for HS mode*/
1127 if (ufshcd_is_hs_mode(&hba->pwr_info)) 1120 if (ufshcd_is_hs_mode(&hba->pwr_info))
1128 ufs_qcom_dev_ref_clk_ctrl(host, true); 1121 ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1130,14 +1123,15 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
1130 if (vote == host->bus_vote.min_bw_vote) 1123 if (vote == host->bus_vote.min_bw_vote)
1131 ufs_qcom_update_bus_bw_vote(host); 1124 ufs_qcom_update_bus_bw_vote(host);
1132 1125
1133 } else { 1126 } else if (!on && (status == PRE_CHANGE)) {
1134 1127 if (!ufs_qcom_is_link_active(hba)) {
1135 /* M-PHY RMMI interface clocks can be turned off */
1136 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1137 if (!ufs_qcom_is_link_active(hba))
1138 /* disable device ref_clk */ 1128 /* disable device ref_clk */
1139 ufs_qcom_dev_ref_clk_ctrl(host, false); 1129 ufs_qcom_dev_ref_clk_ctrl(host, false);
1140 1130
1131 /* powering off PHY during aggressive clk gating */
1132 phy_power_off(host->generic_phy);
1133 }
1134
1141 vote = host->bus_vote.min_bw_vote; 1135 vote = host->bus_vote.min_bw_vote;
1142 } 1136 }
1143 1137
@@ -1146,7 +1140,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
1146 dev_err(hba->dev, "%s: set bus vote failed %d\n", 1140 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1147 __func__, err); 1141 __func__, err);
1148 1142
1149out:
1150 return err; 1143 return err;
1151} 1144}
1152 1145
@@ -1204,12 +1197,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
1204 if (IS_ERR(host->generic_phy)) { 1197 if (IS_ERR(host->generic_phy)) {
1205 err = PTR_ERR(host->generic_phy); 1198 err = PTR_ERR(host->generic_phy);
1206 dev_err(dev, "%s: PHY get failed %d\n", __func__, err); 1199 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1207 goto out; 1200 goto out_variant_clear;
1208 } 1201 }
1209 1202
1210 err = ufs_qcom_bus_register(host); 1203 err = ufs_qcom_bus_register(host);
1211 if (err) 1204 if (err)
1212 goto out_host_free; 1205 goto out_variant_clear;
1213 1206
1214 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, 1207 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1215 &host->hw_ver.minor, &host->hw_ver.step); 1208 &host->hw_ver.minor, &host->hw_ver.step);
@@ -1254,7 +1247,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
1254 ufs_qcom_set_caps(hba); 1247 ufs_qcom_set_caps(hba);
1255 ufs_qcom_advertise_quirks(hba); 1248 ufs_qcom_advertise_quirks(hba);
1256 1249
1257 ufs_qcom_setup_clocks(hba, true); 1250 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1258 1251
1259 if (hba->dev->id < MAX_UFS_QCOM_HOSTS) 1252 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1260 ufs_qcom_hosts[hba->dev->id] = host; 1253 ufs_qcom_hosts[hba->dev->id] = host;
@@ -1274,8 +1267,7 @@ out_disable_phy:
1274 phy_power_off(host->generic_phy); 1267 phy_power_off(host->generic_phy);
1275out_unregister_bus: 1268out_unregister_bus:
1276 phy_exit(host->generic_phy); 1269 phy_exit(host->generic_phy);
1277out_host_free: 1270out_variant_clear:
1278 devm_kfree(dev, host);
1279 ufshcd_set_variant(hba, NULL); 1271 ufshcd_set_variant(hba, NULL);
1280out: 1272out:
1281 return err; 1273 return err;
@@ -1287,6 +1279,7 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
1287 1279
1288 ufs_qcom_disable_lane_clks(host); 1280 ufs_qcom_disable_lane_clks(host);
1289 phy_power_off(host->generic_phy); 1281 phy_power_off(host->generic_phy);
1282 phy_exit(host->generic_phy);
1290} 1283}
1291 1284
1292static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, 1285static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 845b874e2977..8e6709a3fb6b 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -46,6 +46,7 @@
46#define QUERY_DESC_HDR_SIZE 2 46#define QUERY_DESC_HDR_SIZE 2
47#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ 47#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
48 (sizeof(struct utp_upiu_header))) 48 (sizeof(struct utp_upiu_header)))
49#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
49 50
50#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ 51#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
51 cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ 52 cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -162,7 +163,7 @@ enum desc_header_offset {
162}; 163};
163 164
164enum ufs_desc_max_size { 165enum ufs_desc_max_size {
165 QUERY_DESC_DEVICE_MAX_SIZE = 0x1F, 166 QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
166 QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90, 167 QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
167 QUERY_DESC_UNIT_MAX_SIZE = 0x23, 168 QUERY_DESC_UNIT_MAX_SIZE = 0x23,
168 QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06, 169 QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
@@ -416,7 +417,7 @@ struct utp_cmd_rsp {
416 __be32 residual_transfer_count; 417 __be32 residual_transfer_count;
417 __be32 reserved[4]; 418 __be32 reserved[4];
418 __be16 sense_data_len; 419 __be16 sense_data_len;
419 u8 sense_data[18]; 420 u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
420}; 421};
421 422
422/** 423/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 22f881e9253a..f7983058f3f7 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -128,6 +128,13 @@ struct ufs_dev_fix {
128 */ 128 */
129#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6) 129#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
130 130
131/*
132 * Some UFS devices require host PA_TACTIVATE to be lower than device
133 * PA_TACTIVATE, enabling this quirk ensure this.
134 */
135#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
136
137
131struct ufs_hba; 138struct ufs_hba;
132void ufs_advertise_fixup_device(struct ufs_hba *hba); 139void ufs_advertise_fixup_device(struct ufs_hba *hba);
133 140
@@ -140,6 +147,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
140 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS), 147 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
141 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, 148 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
142 UFS_DEVICE_NO_FASTAUTO), 149 UFS_DEVICE_NO_FASTAUTO),
150 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
151 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
143 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL, 152 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
144 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), 153 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
145 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG", 154 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index d15eaa466c59..52b546fb509b 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
104 pm_runtime_forbid(&pdev->dev); 104 pm_runtime_forbid(&pdev->dev);
105 pm_runtime_get_noresume(&pdev->dev); 105 pm_runtime_get_noresume(&pdev->dev);
106 ufshcd_remove(hba); 106 ufshcd_remove(hba);
107 ufshcd_dealloc_host(hba);
107} 108}
108 109
109/** 110/**
@@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
147 err = ufshcd_init(hba, mmio_base, pdev->irq); 148 err = ufshcd_init(hba, mmio_base, pdev->irq);
148 if (err) { 149 if (err) {
149 dev_err(&pdev->dev, "Initialization failed\n"); 150 dev_err(&pdev->dev, "Initialization failed\n");
151 ufshcd_dealloc_host(hba);
150 return err; 152 return err;
151 } 153 }
152 154
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index db53f38da864..a72a4ba78125 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
163 if (ret) { 163 if (ret) {
164 dev_err(dev, "%s: unable to find %s err %d\n", 164 dev_err(dev, "%s: unable to find %s err %d\n",
165 __func__, prop_name, ret); 165 __func__, prop_name, ret);
166 goto out_free; 166 goto out;
167 } 167 }
168 168
169 vreg->min_uA = 0; 169 vreg->min_uA = 0;
@@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
185 185
186 goto out; 186 goto out;
187 187
188out_free:
189 devm_kfree(dev, vreg);
190 vreg = NULL;
191out: 188out:
192 if (!ret) 189 if (!ret)
193 *out_vreg = vreg; 190 *out_vreg = vreg;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index cf549871c1ee..ef8548c3a423 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -45,6 +45,8 @@
45#include "ufs_quirks.h" 45#include "ufs_quirks.h"
46#include "unipro.h" 46#include "unipro.h"
47 47
48#define UFSHCD_REQ_SENSE_SIZE 18
49
48#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 50#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
49 UTP_TASK_REQ_COMPL |\ 51 UTP_TASK_REQ_COMPL |\
50 UFSHCD_ERROR_MASK) 52 UFSHCD_ERROR_MASK)
@@ -57,15 +59,9 @@
57#define NOP_OUT_TIMEOUT 30 /* msecs */ 59#define NOP_OUT_TIMEOUT 30 /* msecs */
58 60
59/* Query request retries */ 61/* Query request retries */
60#define QUERY_REQ_RETRIES 10 62#define QUERY_REQ_RETRIES 3
61/* Query request timeout */ 63/* Query request timeout */
62#define QUERY_REQ_TIMEOUT 30 /* msec */ 64#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
63/*
64 * Query request timeout for fDeviceInit flag
65 * fDeviceInit query response time for some devices is too large that default
66 * QUERY_REQ_TIMEOUT may not be enough for such devices.
67 */
68#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
69 65
70/* Task management command timeout */ 66/* Task management command timeout */
71#define TM_CMD_TIMEOUT 100 /* msecs */ 67#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -123,6 +119,7 @@ enum {
123 UFSHCD_STATE_RESET, 119 UFSHCD_STATE_RESET,
124 UFSHCD_STATE_ERROR, 120 UFSHCD_STATE_ERROR,
125 UFSHCD_STATE_OPERATIONAL, 121 UFSHCD_STATE_OPERATIONAL,
122 UFSHCD_STATE_EH_SCHEDULED,
126}; 123};
127 124
128/* UFSHCD error handling flags */ 125/* UFSHCD error handling flags */
@@ -598,6 +595,20 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
598 return false; 595 return false;
599} 596}
600 597
598static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
599{
600 if (ufshcd_is_clkscaling_enabled(hba)) {
601 devfreq_suspend_device(hba->devfreq);
602 hba->clk_scaling.window_start_t = 0;
603 }
604}
605
606static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
607{
608 if (ufshcd_is_clkscaling_enabled(hba))
609 devfreq_resume_device(hba->devfreq);
610}
611
601static void ufshcd_ungate_work(struct work_struct *work) 612static void ufshcd_ungate_work(struct work_struct *work)
602{ 613{
603 int ret; 614 int ret;
@@ -631,8 +642,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
631 hba->clk_gating.is_suspended = false; 642 hba->clk_gating.is_suspended = false;
632 } 643 }
633unblock_reqs: 644unblock_reqs:
634 if (ufshcd_is_clkscaling_enabled(hba)) 645 ufshcd_resume_clkscaling(hba);
635 devfreq_resume_device(hba->devfreq);
636 scsi_unblock_requests(hba->host); 646 scsi_unblock_requests(hba->host);
637} 647}
638 648
@@ -660,6 +670,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
660start: 670start:
661 switch (hba->clk_gating.state) { 671 switch (hba->clk_gating.state) {
662 case CLKS_ON: 672 case CLKS_ON:
673 /*
674 * Wait for the ungate work to complete if in progress.
675 * Though the clocks may be in ON state, the link could
676 * still be in hibner8 state if hibern8 is allowed
677 * during clock gating.
678 * Make sure we exit hibern8 state also in addition to
679 * clocks being ON.
680 */
681 if (ufshcd_can_hibern8_during_gating(hba) &&
682 ufshcd_is_link_hibern8(hba)) {
683 spin_unlock_irqrestore(hba->host->host_lock, flags);
684 flush_work(&hba->clk_gating.ungate_work);
685 spin_lock_irqsave(hba->host->host_lock, flags);
686 goto start;
687 }
663 break; 688 break;
664 case REQ_CLKS_OFF: 689 case REQ_CLKS_OFF:
665 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { 690 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@ -709,7 +734,14 @@ static void ufshcd_gate_work(struct work_struct *work)
709 unsigned long flags; 734 unsigned long flags;
710 735
711 spin_lock_irqsave(hba->host->host_lock, flags); 736 spin_lock_irqsave(hba->host->host_lock, flags);
712 if (hba->clk_gating.is_suspended) { 737 /*
738 * In case you are here to cancel this work the gating state
739 * would be marked as REQ_CLKS_ON. In this case save time by
740 * skipping the gating work and exit after changing the clock
741 * state to CLKS_ON.
742 */
743 if (hba->clk_gating.is_suspended ||
744 (hba->clk_gating.state == REQ_CLKS_ON)) {
713 hba->clk_gating.state = CLKS_ON; 745 hba->clk_gating.state = CLKS_ON;
714 goto rel_lock; 746 goto rel_lock;
715 } 747 }
@@ -731,10 +763,7 @@ static void ufshcd_gate_work(struct work_struct *work)
731 ufshcd_set_link_hibern8(hba); 763 ufshcd_set_link_hibern8(hba);
732 } 764 }
733 765
734 if (ufshcd_is_clkscaling_enabled(hba)) { 766 ufshcd_suspend_clkscaling(hba);
735 devfreq_suspend_device(hba->devfreq);
736 hba->clk_scaling.window_start_t = 0;
737 }
738 767
739 if (!ufshcd_is_link_active(hba)) 768 if (!ufshcd_is_link_active(hba))
740 ufshcd_setup_clocks(hba, false); 769 ufshcd_setup_clocks(hba, false);
@@ -878,6 +907,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
878 ufshcd_clk_scaling_start_busy(hba); 907 ufshcd_clk_scaling_start_busy(hba);
879 __set_bit(task_tag, &hba->outstanding_reqs); 908 __set_bit(task_tag, &hba->outstanding_reqs);
880 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); 909 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
910 /* Make sure that doorbell is committed immediately */
911 wmb();
881} 912}
882 913
883/** 914/**
@@ -889,10 +920,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
889 int len; 920 int len;
890 if (lrbp->sense_buffer && 921 if (lrbp->sense_buffer &&
891 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { 922 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
923 int len_to_copy;
924
892 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); 925 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
926 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
927
893 memcpy(lrbp->sense_buffer, 928 memcpy(lrbp->sense_buffer,
894 lrbp->ucd_rsp_ptr->sr.sense_data, 929 lrbp->ucd_rsp_ptr->sr.sense_data,
895 min_t(int, len, SCSI_SENSE_BUFFERSIZE)); 930 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
896 } 931 }
897} 932}
898 933
@@ -1088,7 +1123,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1088 * 1123 *
1089 * Returns 0 in case of success, non-zero value in case of failure 1124 * Returns 0 in case of success, non-zero value in case of failure
1090 */ 1125 */
1091static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) 1126static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1092{ 1127{
1093 struct ufshcd_sg_entry *prd_table; 1128 struct ufshcd_sg_entry *prd_table;
1094 struct scatterlist *sg; 1129 struct scatterlist *sg;
@@ -1102,8 +1137,13 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
1102 return sg_segments; 1137 return sg_segments;
1103 1138
1104 if (sg_segments) { 1139 if (sg_segments) {
1105 lrbp->utr_descriptor_ptr->prd_table_length = 1140 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
1106 cpu_to_le16((u16) (sg_segments)); 1141 lrbp->utr_descriptor_ptr->prd_table_length =
1142 cpu_to_le16((u16)(sg_segments *
1143 sizeof(struct ufshcd_sg_entry)));
1144 else
1145 lrbp->utr_descriptor_ptr->prd_table_length =
1146 cpu_to_le16((u16) (sg_segments));
1107 1147
1108 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; 1148 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1109 1149
@@ -1410,6 +1450,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1410 switch (hba->ufshcd_state) { 1450 switch (hba->ufshcd_state) {
1411 case UFSHCD_STATE_OPERATIONAL: 1451 case UFSHCD_STATE_OPERATIONAL:
1412 break; 1452 break;
1453 case UFSHCD_STATE_EH_SCHEDULED:
1413 case UFSHCD_STATE_RESET: 1454 case UFSHCD_STATE_RESET:
1414 err = SCSI_MLQUEUE_HOST_BUSY; 1455 err = SCSI_MLQUEUE_HOST_BUSY;
1415 goto out_unlock; 1456 goto out_unlock;
@@ -1457,7 +1498,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1457 1498
1458 WARN_ON(lrbp->cmd); 1499 WARN_ON(lrbp->cmd);
1459 lrbp->cmd = cmd; 1500 lrbp->cmd = cmd;
1460 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; 1501 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
1461 lrbp->sense_buffer = cmd->sense_buffer; 1502 lrbp->sense_buffer = cmd->sense_buffer;
1462 lrbp->task_tag = tag; 1503 lrbp->task_tag = tag;
1463 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 1504 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -1465,15 +1506,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1465 1506
1466 ufshcd_comp_scsi_upiu(hba, lrbp); 1507 ufshcd_comp_scsi_upiu(hba, lrbp);
1467 1508
1468 err = ufshcd_map_sg(lrbp); 1509 err = ufshcd_map_sg(hba, lrbp);
1469 if (err) { 1510 if (err) {
1470 lrbp->cmd = NULL; 1511 lrbp->cmd = NULL;
1471 clear_bit_unlock(tag, &hba->lrb_in_use); 1512 clear_bit_unlock(tag, &hba->lrb_in_use);
1472 goto out; 1513 goto out;
1473 } 1514 }
1515 /* Make sure descriptors are ready before ringing the doorbell */
1516 wmb();
1474 1517
1475 /* issue command to the controller */ 1518 /* issue command to the controller */
1476 spin_lock_irqsave(hba->host->host_lock, flags); 1519 spin_lock_irqsave(hba->host->host_lock, flags);
1520 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
1477 ufshcd_send_command(hba, tag); 1521 ufshcd_send_command(hba, tag);
1478out_unlock: 1522out_unlock:
1479 spin_unlock_irqrestore(hba->host->host_lock, flags); 1523 spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1581,6 +1625,8 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1581 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, 1625 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1582 msecs_to_jiffies(max_timeout)); 1626 msecs_to_jiffies(max_timeout));
1583 1627
1628 /* Make sure descriptors are ready before ringing the doorbell */
1629 wmb();
1584 spin_lock_irqsave(hba->host->host_lock, flags); 1630 spin_lock_irqsave(hba->host->host_lock, flags);
1585 hba->dev_cmd.complete = NULL; 1631 hba->dev_cmd.complete = NULL;
1586 if (likely(time_left)) { 1632 if (likely(time_left)) {
@@ -1683,6 +1729,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1683 /* Make sure descriptors are ready before ringing the doorbell */ 1729 /* Make sure descriptors are ready before ringing the doorbell */
1684 wmb(); 1730 wmb();
1685 spin_lock_irqsave(hba->host->host_lock, flags); 1731 spin_lock_irqsave(hba->host->host_lock, flags);
1732 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
1686 ufshcd_send_command(hba, tag); 1733 ufshcd_send_command(hba, tag);
1687 spin_unlock_irqrestore(hba->host->host_lock, flags); 1734 spin_unlock_irqrestore(hba->host->host_lock, flags);
1688 1735
@@ -1789,9 +1836,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1789 goto out_unlock; 1836 goto out_unlock;
1790 } 1837 }
1791 1838
1792 if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
1793 timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
1794
1795 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); 1839 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
1796 1840
1797 if (err) { 1841 if (err) {
@@ -1861,8 +1905,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1861 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 1905 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1862 1906
1863 if (err) { 1907 if (err) {
1864 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", 1908 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1865 __func__, opcode, idn, err); 1909 __func__, opcode, idn, index, err);
1866 goto out_unlock; 1910 goto out_unlock;
1867 } 1911 }
1868 1912
@@ -1961,8 +2005,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1961 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 2005 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1962 2006
1963 if (err) { 2007 if (err) {
1964 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", 2008 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1965 __func__, opcode, idn, err); 2009 __func__, opcode, idn, index, err);
1966 goto out_unlock; 2010 goto out_unlock;
1967 } 2011 }
1968 2012
@@ -2055,18 +2099,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
2055 desc_id, desc_index, 0, desc_buf, 2099 desc_id, desc_index, 0, desc_buf,
2056 &buff_len); 2100 &buff_len);
2057 2101
2058 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || 2102 if (ret) {
2059 (desc_buf[QUERY_DESC_LENGTH_OFFSET] != 2103 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
2060 ufs_query_desc_max_size[desc_id]) 2104 __func__, desc_id, desc_index, param_offset, ret);
2061 || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
2062 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
2063 __func__, desc_id, param_offset, buff_len, ret);
2064 if (!ret)
2065 ret = -EINVAL;
2066 2105
2067 goto out; 2106 goto out;
2068 } 2107 }
2069 2108
2109 /* Sanity check */
2110 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
2111 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
2112 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
2113 ret = -EINVAL;
2114 goto out;
2115 }
2116
2117 /*
2118 * While reading variable size descriptors (like string descriptor),
2119 * some UFS devices may report the "LENGTH" (field in "Transaction
2120 * Specific fields" of Query Response UPIU) same as what was requested
2121 * in Query Request UPIU instead of reporting the actual size of the
2122 * variable size descriptor.
2123 * Although it's safe to ignore the "LENGTH" field for variable size
2124 * descriptors as we can always derive the length of the descriptor from
2125 * the descriptor header fields. Hence this change impose the length
2126 * match check only for fixed size descriptors (for which we always
2127 * request the correct size as part of Query Request UPIU).
2128 */
2129 if ((desc_id != QUERY_DESC_IDN_STRING) &&
2130 (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
2131 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
2132 __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
2133 ret = -EINVAL;
2134 goto out;
2135 }
2136
2070 if (is_kmalloc) 2137 if (is_kmalloc)
2071 memcpy(param_read_buf, &desc_buf[param_offset], param_size); 2138 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2072out: 2139out:
@@ -2088,7 +2155,18 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2088 u8 *buf, 2155 u8 *buf,
2089 u32 size) 2156 u32 size)
2090{ 2157{
2091 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); 2158 int err = 0;
2159 int retries;
2160
2161 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2162 /* Read descriptor*/
2163 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2164 if (!err)
2165 break;
2166 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2167 }
2168
2169 return err;
2092} 2170}
2093 2171
2094int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) 2172int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
@@ -2320,12 +2398,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2320 cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); 2398 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2321 2399
2322 /* Response upiu and prdt offset should be in double words */ 2400 /* Response upiu and prdt offset should be in double words */
2323 utrdlp[i].response_upiu_offset = 2401 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
2402 utrdlp[i].response_upiu_offset =
2403 cpu_to_le16(response_offset);
2404 utrdlp[i].prd_table_offset =
2405 cpu_to_le16(prdt_offset);
2406 utrdlp[i].response_upiu_length =
2407 cpu_to_le16(ALIGNED_UPIU_SIZE);
2408 } else {
2409 utrdlp[i].response_upiu_offset =
2324 cpu_to_le16((response_offset >> 2)); 2410 cpu_to_le16((response_offset >> 2));
2325 utrdlp[i].prd_table_offset = 2411 utrdlp[i].prd_table_offset =
2326 cpu_to_le16((prdt_offset >> 2)); 2412 cpu_to_le16((prdt_offset >> 2));
2327 utrdlp[i].response_upiu_length = 2413 utrdlp[i].response_upiu_length =
2328 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); 2414 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
2415 }
2329 2416
2330 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); 2417 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
2331 hba->lrb[i].ucd_req_ptr = 2418 hba->lrb[i].ucd_req_ptr =
@@ -2429,10 +2516,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2429 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); 2516 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2430 } while (ret && peer && --retries); 2517 } while (ret && peer && --retries);
2431 2518
2432 if (!retries) 2519 if (ret)
2433 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", 2520 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
2434 set, UIC_GET_ATTR_ID(attr_sel), mib_val, 2521 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
2435 retries); 2522 UFS_UIC_COMMAND_RETRIES - retries);
2436 2523
2437 return ret; 2524 return ret;
2438} 2525}
@@ -2496,9 +2583,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2496 get, UIC_GET_ATTR_ID(attr_sel), ret); 2583 get, UIC_GET_ATTR_ID(attr_sel), ret);
2497 } while (ret && peer && --retries); 2584 } while (ret && peer && --retries);
2498 2585
2499 if (!retries) 2586 if (ret)
2500 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", 2587 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
2501 get, UIC_GET_ATTR_ID(attr_sel), retries); 2588 get, UIC_GET_ATTR_ID(attr_sel),
2589 UFS_UIC_COMMAND_RETRIES - retries);
2502 2590
2503 if (mib_val && !ret) 2591 if (mib_val && !ret)
2504 *mib_val = uic_cmd.argument3; 2592 *mib_val = uic_cmd.argument3;
@@ -2651,6 +2739,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2651 int ret; 2739 int ret;
2652 struct uic_command uic_cmd = {0}; 2740 struct uic_command uic_cmd = {0};
2653 2741
2742 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
2743
2654 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; 2744 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
2655 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 2745 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2656 2746
@@ -2664,7 +2754,9 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2664 */ 2754 */
2665 if (ufshcd_link_recovery(hba)) 2755 if (ufshcd_link_recovery(hba))
2666 ret = -ENOLINK; 2756 ret = -ENOLINK;
2667 } 2757 } else
2758 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
2759 POST_CHANGE);
2668 2760
2669 return ret; 2761 return ret;
2670} 2762}
@@ -2687,13 +2779,17 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2687 struct uic_command uic_cmd = {0}; 2779 struct uic_command uic_cmd = {0};
2688 int ret; 2780 int ret;
2689 2781
2782 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
2783
2690 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; 2784 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2691 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 2785 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2692 if (ret) { 2786 if (ret) {
2693 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", 2787 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
2694 __func__, ret); 2788 __func__, ret);
2695 ret = ufshcd_link_recovery(hba); 2789 ret = ufshcd_link_recovery(hba);
2696 } 2790 } else
2791 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
2792 POST_CHANGE);
2697 2793
2698 return ret; 2794 return ret;
2699} 2795}
@@ -2725,8 +2821,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
2725 if (hba->max_pwr_info.is_valid) 2821 if (hba->max_pwr_info.is_valid)
2726 return 0; 2822 return 0;
2727 2823
2728 pwr_info->pwr_tx = FASTAUTO_MODE; 2824 pwr_info->pwr_tx = FAST_MODE;
2729 pwr_info->pwr_rx = FASTAUTO_MODE; 2825 pwr_info->pwr_rx = FAST_MODE;
2730 pwr_info->hs_rate = PA_HS_MODE_B; 2826 pwr_info->hs_rate = PA_HS_MODE_B;
2731 2827
2732 /* Get the connected lane count */ 2828 /* Get the connected lane count */
@@ -2757,7 +2853,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
2757 __func__, pwr_info->gear_rx); 2853 __func__, pwr_info->gear_rx);
2758 return -EINVAL; 2854 return -EINVAL;
2759 } 2855 }
2760 pwr_info->pwr_rx = SLOWAUTO_MODE; 2856 pwr_info->pwr_rx = SLOW_MODE;
2761 } 2857 }
2762 2858
2763 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), 2859 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2770,7 +2866,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
2770 __func__, pwr_info->gear_tx); 2866 __func__, pwr_info->gear_tx);
2771 return -EINVAL; 2867 return -EINVAL;
2772 } 2868 }
2773 pwr_info->pwr_tx = SLOWAUTO_MODE; 2869 pwr_info->pwr_tx = SLOW_MODE;
2774 } 2870 }
2775 2871
2776 hba->max_pwr_info.is_valid = true; 2872 hba->max_pwr_info.is_valid = true;
@@ -3090,7 +3186,16 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
3090{ 3186{
3091 int ret; 3187 int ret;
3092 int retries = DME_LINKSTARTUP_RETRIES; 3188 int retries = DME_LINKSTARTUP_RETRIES;
3189 bool link_startup_again = false;
3190
3191 /*
3192 * If UFS device isn't active then we will have to issue link startup
3193 * 2 times to make sure the device state move to active.
3194 */
3195 if (!ufshcd_is_ufs_dev_active(hba))
3196 link_startup_again = true;
3093 3197
3198link_startup:
3094 do { 3199 do {
3095 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); 3200 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
3096 3201
@@ -3116,6 +3221,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
3116 /* failed to get the link up... retire */ 3221 /* failed to get the link up... retire */
3117 goto out; 3222 goto out;
3118 3223
3224 if (link_startup_again) {
3225 link_startup_again = false;
3226 retries = DME_LINKSTARTUP_RETRIES;
3227 goto link_startup;
3228 }
3229
3119 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { 3230 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
3120 ret = ufshcd_disable_device_tx_lcc(hba); 3231 ret = ufshcd_disable_device_tx_lcc(hba);
3121 if (ret) 3232 if (ret)
@@ -3181,16 +3292,24 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
3181{ 3292{
3182 int ret = 0; 3293 int ret = 0;
3183 u8 lun_qdepth; 3294 u8 lun_qdepth;
3295 int retries;
3184 struct ufs_hba *hba; 3296 struct ufs_hba *hba;
3185 3297
3186 hba = shost_priv(sdev->host); 3298 hba = shost_priv(sdev->host);
3187 3299
3188 lun_qdepth = hba->nutrs; 3300 lun_qdepth = hba->nutrs;
3189 ret = ufshcd_read_unit_desc_param(hba, 3301 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3190 ufshcd_scsi_to_upiu_lun(sdev->lun), 3302 /* Read descriptor*/
3191 UNIT_DESC_PARAM_LU_Q_DEPTH, 3303 ret = ufshcd_read_unit_desc_param(hba,
3192 &lun_qdepth, 3304 ufshcd_scsi_to_upiu_lun(sdev->lun),
3193 sizeof(lun_qdepth)); 3305 UNIT_DESC_PARAM_LU_Q_DEPTH,
3306 &lun_qdepth,
3307 sizeof(lun_qdepth));
3308 if (!ret || ret == -ENOTSUPP)
3309 break;
3310
3311 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
3312 }
3194 3313
3195 /* Some WLUN doesn't support unit descriptor */ 3314 /* Some WLUN doesn't support unit descriptor */
3196 if (ret == -EOPNOTSUPP) 3315 if (ret == -EOPNOTSUPP)
@@ -4097,6 +4216,17 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
4097{ 4216{
4098 u32 reg; 4217 u32 reg;
4099 4218
4219 /* PHY layer lane error */
4220 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4221 /* Ignore LINERESET indication, as this is not an error */
4222 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
4223 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
4224 /*
4225 * To know whether this error is fatal or not, DB timeout
4226 * must be checked but this error is handled separately.
4227 */
4228 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
4229
4100 /* PA_INIT_ERROR is fatal and needs UIC reset */ 4230 /* PA_INIT_ERROR is fatal and needs UIC reset */
4101 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 4231 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
4102 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 4232 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
@@ -4158,7 +4288,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
4158 /* block commands from scsi mid-layer */ 4288 /* block commands from scsi mid-layer */
4159 scsi_block_requests(hba->host); 4289 scsi_block_requests(hba->host);
4160 4290
4161 hba->ufshcd_state = UFSHCD_STATE_ERROR; 4291 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
4162 schedule_work(&hba->eh_work); 4292 schedule_work(&hba->eh_work);
4163 } 4293 }
4164 } 4294 }
@@ -4311,6 +4441,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
4311 task_req_upiup->input_param1 = cpu_to_be32(lun_id); 4441 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
4312 task_req_upiup->input_param2 = cpu_to_be32(task_id); 4442 task_req_upiup->input_param2 = cpu_to_be32(task_id);
4313 4443
4444 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
4445
4314 /* send command to the controller */ 4446 /* send command to the controller */
4315 __set_bit(free_slot, &hba->outstanding_tasks); 4447 __set_bit(free_slot, &hba->outstanding_tasks);
4316 4448
@@ -4318,6 +4450,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
4318 wmb(); 4450 wmb();
4319 4451
4320 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); 4452 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
4453 /* Make sure that doorbell is committed immediately */
4454 wmb();
4321 4455
4322 spin_unlock_irqrestore(host->host_lock, flags); 4456 spin_unlock_irqrestore(host->host_lock, flags);
4323 4457
@@ -4722,6 +4856,24 @@ out:
4722 return icc_level; 4856 return icc_level;
4723} 4857}
4724 4858
4859static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
4860{
4861 int ret = 0;
4862 int retries;
4863
4864 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
4865 /* write attribute */
4866 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4867 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
4868 if (!ret)
4869 break;
4870
4871 dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
4872 }
4873
4874 return ret;
4875}
4876
4725static void ufshcd_init_icc_levels(struct ufs_hba *hba) 4877static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4726{ 4878{
4727 int ret; 4879 int ret;
@@ -4742,9 +4894,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4742 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", 4894 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4743 __func__, hba->init_prefetch_data.icc_level); 4895 __func__, hba->init_prefetch_data.icc_level);
4744 4896
4745 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 4897 ret = ufshcd_set_icc_levels_attr(hba,
4746 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 4898 hba->init_prefetch_data.icc_level);
4747 &hba->init_prefetch_data.icc_level);
4748 4899
4749 if (ret) 4900 if (ret)
4750 dev_err(hba->dev, 4901 dev_err(hba->dev,
@@ -4965,6 +5116,76 @@ out:
4965 return ret; 5116 return ret;
4966} 5117}
4967 5118
5119/**
5120 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
5121 * less than device PA_TACTIVATE time.
5122 * @hba: per-adapter instance
5123 *
5124 * Some UFS devices require host PA_TACTIVATE to be lower than device
5125 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
5126 * for such devices.
5127 *
5128 * Returns zero on success, non-zero error value on failure.
5129 */
5130static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
5131{
5132 int ret = 0;
5133 u32 granularity, peer_granularity;
5134 u32 pa_tactivate, peer_pa_tactivate;
5135 u32 pa_tactivate_us, peer_pa_tactivate_us;
5136 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
5137
5138 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5139 &granularity);
5140 if (ret)
5141 goto out;
5142
5143 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5144 &peer_granularity);
5145 if (ret)
5146 goto out;
5147
5148 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
5149 (granularity > PA_GRANULARITY_MAX_VAL)) {
5150 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
5151 __func__, granularity);
5152 return -EINVAL;
5153 }
5154
5155 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
5156 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
5157 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
5158 __func__, peer_granularity);
5159 return -EINVAL;
5160 }
5161
5162 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
5163 if (ret)
5164 goto out;
5165
5166 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
5167 &peer_pa_tactivate);
5168 if (ret)
5169 goto out;
5170
5171 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
5172 peer_pa_tactivate_us = peer_pa_tactivate *
5173 gran_to_us_table[peer_granularity - 1];
5174
5175 if (pa_tactivate_us > peer_pa_tactivate_us) {
5176 u32 new_peer_pa_tactivate;
5177
5178 new_peer_pa_tactivate = pa_tactivate_us /
5179 gran_to_us_table[peer_granularity - 1];
5180 new_peer_pa_tactivate++;
5181 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
5182 new_peer_pa_tactivate);
5183 }
5184
5185out:
5186 return ret;
5187}
5188
4968static void ufshcd_tune_unipro_params(struct ufs_hba *hba) 5189static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
4969{ 5190{
4970 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { 5191 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
@@ -4975,6 +5196,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
4975 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) 5196 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
4976 /* set 1ms timeout for PA_TACTIVATE */ 5197 /* set 1ms timeout for PA_TACTIVATE */
4977 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); 5198 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
5199
5200 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
5201 ufshcd_quirk_tune_host_pa_tactivate(hba);
4978} 5202}
4979 5203
4980/** 5204/**
@@ -5027,9 +5251,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
5027 __func__); 5251 __func__);
5028 } else { 5252 } else {
5029 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); 5253 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
5030 if (ret) 5254 if (ret) {
5031 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", 5255 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
5032 __func__, ret); 5256 __func__, ret);
5257 goto out;
5258 }
5033 } 5259 }
5034 5260
5035 /* set the state as operational after switching to desired gear */ 5261 /* set the state as operational after switching to desired gear */
@@ -5062,8 +5288,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
5062 hba->is_init_prefetch = true; 5288 hba->is_init_prefetch = true;
5063 5289
5064 /* Resume devfreq after UFS device is detected */ 5290 /* Resume devfreq after UFS device is detected */
5065 if (ufshcd_is_clkscaling_enabled(hba)) 5291 ufshcd_resume_clkscaling(hba);
5066 devfreq_resume_device(hba->devfreq);
5067 5292
5068out: 5293out:
5069 /* 5294 /*
@@ -5389,6 +5614,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5389 if (!head || list_empty(head)) 5614 if (!head || list_empty(head))
5390 goto out; 5615 goto out;
5391 5616
5617 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
5618 if (ret)
5619 return ret;
5620
5392 list_for_each_entry(clki, head, list) { 5621 list_for_each_entry(clki, head, list) {
5393 if (!IS_ERR_OR_NULL(clki->clk)) { 5622 if (!IS_ERR_OR_NULL(clki->clk)) {
5394 if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) 5623 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
@@ -5410,7 +5639,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5410 } 5639 }
5411 } 5640 }
5412 5641
5413 ret = ufshcd_vops_setup_clocks(hba, on); 5642 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
5643 if (ret)
5644 return ret;
5645
5414out: 5646out:
5415 if (ret) { 5647 if (ret) {
5416 list_for_each_entry(clki, head, list) { 5648 list_for_each_entry(clki, head, list) {
@@ -5500,8 +5732,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
5500 if (!hba->vops) 5732 if (!hba->vops)
5501 return; 5733 return;
5502 5734
5503 ufshcd_vops_setup_clocks(hba, false);
5504
5505 ufshcd_vops_setup_regulators(hba, false); 5735 ufshcd_vops_setup_regulators(hba, false);
5506 5736
5507 ufshcd_vops_exit(hba); 5737 ufshcd_vops_exit(hba);
@@ -5564,6 +5794,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
5564 if (hba->is_powered) { 5794 if (hba->is_powered) {
5565 ufshcd_variant_hba_exit(hba); 5795 ufshcd_variant_hba_exit(hba);
5566 ufshcd_setup_vreg(hba, false); 5796 ufshcd_setup_vreg(hba, false);
5797 ufshcd_suspend_clkscaling(hba);
5567 ufshcd_setup_clocks(hba, false); 5798 ufshcd_setup_clocks(hba, false);
5568 ufshcd_setup_hba_vreg(hba, false); 5799 ufshcd_setup_hba_vreg(hba, false);
5569 hba->is_powered = false; 5800 hba->is_powered = false;
@@ -5577,19 +5808,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
5577 0, 5808 0,
5578 0, 5809 0,
5579 0, 5810 0,
5580 SCSI_SENSE_BUFFERSIZE, 5811 UFSHCD_REQ_SENSE_SIZE,
5581 0}; 5812 0};
5582 char *buffer; 5813 char *buffer;
5583 int ret; 5814 int ret;
5584 5815
5585 buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 5816 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
5586 if (!buffer) { 5817 if (!buffer) {
5587 ret = -ENOMEM; 5818 ret = -ENOMEM;
5588 goto out; 5819 goto out;
5589 } 5820 }
5590 5821
5591 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, 5822 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
5592 SCSI_SENSE_BUFFERSIZE, NULL, 5823 UFSHCD_REQ_SENSE_SIZE, NULL,
5593 msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM); 5824 msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
5594 if (ret) 5825 if (ret)
5595 pr_err("%s: failed with err %d\n", __func__, ret); 5826 pr_err("%s: failed with err %d\n", __func__, ret);
@@ -5766,7 +5997,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
5766 !hba->dev_info.is_lu_power_on_wp) { 5997 !hba->dev_info.is_lu_power_on_wp) {
5767 ret = ufshcd_setup_vreg(hba, true); 5998 ret = ufshcd_setup_vreg(hba, true);
5768 } else if (!ufshcd_is_ufs_dev_active(hba)) { 5999 } else if (!ufshcd_is_ufs_dev_active(hba)) {
5769 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
5770 if (!ret && !ufshcd_is_link_active(hba)) { 6000 if (!ret && !ufshcd_is_link_active(hba)) {
5771 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); 6001 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5772 if (ret) 6002 if (ret)
@@ -5775,6 +6005,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
5775 if (ret) 6005 if (ret)
5776 goto vccq_lpm; 6006 goto vccq_lpm;
5777 } 6007 }
6008 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
5778 } 6009 }
5779 goto out; 6010 goto out;
5780 6011
@@ -5839,6 +6070,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5839 ufshcd_hold(hba, false); 6070 ufshcd_hold(hba, false);
5840 hba->clk_gating.is_suspended = true; 6071 hba->clk_gating.is_suspended = true;
5841 6072
6073 ufshcd_suspend_clkscaling(hba);
6074
5842 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && 6075 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
5843 req_link_state == UIC_LINK_ACTIVE_STATE) { 6076 req_link_state == UIC_LINK_ACTIVE_STATE) {
5844 goto disable_clks; 6077 goto disable_clks;
@@ -5846,12 +6079,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5846 6079
5847 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && 6080 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
5848 (req_link_state == hba->uic_link_state)) 6081 (req_link_state == hba->uic_link_state))
5849 goto out; 6082 goto enable_gating;
5850 6083
5851 /* UFS device & link must be active before we enter in this function */ 6084 /* UFS device & link must be active before we enter in this function */
5852 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { 6085 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
5853 ret = -EINVAL; 6086 ret = -EINVAL;
5854 goto out; 6087 goto enable_gating;
5855 } 6088 }
5856 6089
5857 if (ufshcd_is_runtime_pm(pm_op)) { 6090 if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5888,15 +6121,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5888 6121
5889disable_clks: 6122disable_clks:
5890 /* 6123 /*
5891 * The clock scaling needs access to controller registers. Hence, Wait
5892 * for pending clock scaling work to be done before clocks are
5893 * turned off.
5894 */
5895 if (ufshcd_is_clkscaling_enabled(hba)) {
5896 devfreq_suspend_device(hba->devfreq);
5897 hba->clk_scaling.window_start_t = 0;
5898 }
5899 /*
5900 * Call vendor specific suspend callback. As these callbacks may access 6124 * Call vendor specific suspend callback. As these callbacks may access
5901 * vendor specific host controller register space call them before the 6125 * vendor specific host controller register space call them before the
5902 * host clocks are ON. 6126 * host clocks are ON.
@@ -5905,10 +6129,6 @@ disable_clks:
5905 if (ret) 6129 if (ret)
5906 goto set_link_active; 6130 goto set_link_active;
5907 6131
5908 ret = ufshcd_vops_setup_clocks(hba, false);
5909 if (ret)
5910 goto vops_resume;
5911
5912 if (!ufshcd_is_link_active(hba)) 6132 if (!ufshcd_is_link_active(hba))
5913 ufshcd_setup_clocks(hba, false); 6133 ufshcd_setup_clocks(hba, false);
5914 else 6134 else
@@ -5925,9 +6145,8 @@ disable_clks:
5925 ufshcd_hba_vreg_set_lpm(hba); 6145 ufshcd_hba_vreg_set_lpm(hba);
5926 goto out; 6146 goto out;
5927 6147
5928vops_resume:
5929 ufshcd_vops_resume(hba, pm_op);
5930set_link_active: 6148set_link_active:
6149 ufshcd_resume_clkscaling(hba);
5931 ufshcd_vreg_set_hpm(hba); 6150 ufshcd_vreg_set_hpm(hba);
5932 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) 6151 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
5933 ufshcd_set_link_active(hba); 6152 ufshcd_set_link_active(hba);
@@ -5937,6 +6156,7 @@ set_dev_active:
5937 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) 6156 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
5938 ufshcd_disable_auto_bkops(hba); 6157 ufshcd_disable_auto_bkops(hba);
5939enable_gating: 6158enable_gating:
6159 ufshcd_resume_clkscaling(hba);
5940 hba->clk_gating.is_suspended = false; 6160 hba->clk_gating.is_suspended = false;
5941 ufshcd_release(hba); 6161 ufshcd_release(hba);
5942out: 6162out:
@@ -6015,8 +6235,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6015 ufshcd_urgent_bkops(hba); 6235 ufshcd_urgent_bkops(hba);
6016 hba->clk_gating.is_suspended = false; 6236 hba->clk_gating.is_suspended = false;
6017 6237
6018 if (ufshcd_is_clkscaling_enabled(hba)) 6238 ufshcd_resume_clkscaling(hba);
6019 devfreq_resume_device(hba->devfreq);
6020 6239
6021 /* Schedule clock gating in case of no access to UFS device yet */ 6240 /* Schedule clock gating in case of no access to UFS device yet */
6022 ufshcd_release(hba); 6241 ufshcd_release(hba);
@@ -6030,6 +6249,7 @@ disable_vreg:
6030 ufshcd_vreg_set_lpm(hba); 6249 ufshcd_vreg_set_lpm(hba);
6031disable_irq_and_vops_clks: 6250disable_irq_and_vops_clks:
6032 ufshcd_disable_irq(hba); 6251 ufshcd_disable_irq(hba);
6252 ufshcd_suspend_clkscaling(hba);
6033 ufshcd_setup_clocks(hba, false); 6253 ufshcd_setup_clocks(hba, false);
6034out: 6254out:
6035 hba->pm_op_in_progress = 0; 6255 hba->pm_op_in_progress = 0;
@@ -6052,16 +6272,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
6052 if (!hba || !hba->is_powered) 6272 if (!hba || !hba->is_powered)
6053 return 0; 6273 return 0;
6054 6274
6055 if (pm_runtime_suspended(hba->dev)) { 6275 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
6056 if (hba->rpm_lvl == hba->spm_lvl) 6276 hba->curr_dev_pwr_mode) &&
6057 /* 6277 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
6058 * There is possibility that device may still be in 6278 hba->uic_link_state))
6059 * active state during the runtime suspend. 6279 goto out;
6060 */
6061 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
6062 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
6063 goto out;
6064 6280
6281 if (pm_runtime_suspended(hba->dev)) {
6065 /* 6282 /*
6066 * UFS device and/or UFS link low power states during runtime 6283 * UFS device and/or UFS link low power states during runtime
6067 * suspend seems to be different than what is expected during 6284 * suspend seems to be different than what is expected during
@@ -6092,7 +6309,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
6092 6309
6093int ufshcd_system_resume(struct ufs_hba *hba) 6310int ufshcd_system_resume(struct ufs_hba *hba)
6094{ 6311{
6095 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) 6312 if (!hba)
6313 return -EINVAL;
6314
6315 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
6096 /* 6316 /*
6097 * Let the runtime resume take care of resuming 6317 * Let the runtime resume take care of resuming
6098 * if runtime suspended. 6318 * if runtime suspended.
@@ -6113,7 +6333,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
6113 */ 6333 */
6114int ufshcd_runtime_suspend(struct ufs_hba *hba) 6334int ufshcd_runtime_suspend(struct ufs_hba *hba)
6115{ 6335{
6116 if (!hba || !hba->is_powered) 6336 if (!hba)
6337 return -EINVAL;
6338
6339 if (!hba->is_powered)
6117 return 0; 6340 return 0;
6118 6341
6119 return ufshcd_suspend(hba, UFS_RUNTIME_PM); 6342 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@@ -6143,10 +6366,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
6143 */ 6366 */
6144int ufshcd_runtime_resume(struct ufs_hba *hba) 6367int ufshcd_runtime_resume(struct ufs_hba *hba)
6145{ 6368{
6146 if (!hba || !hba->is_powered) 6369 if (!hba)
6370 return -EINVAL;
6371
6372 if (!hba->is_powered)
6147 return 0; 6373 return 0;
6148 else 6374
6149 return ufshcd_resume(hba, UFS_RUNTIME_PM); 6375 return ufshcd_resume(hba, UFS_RUNTIME_PM);
6150} 6376}
6151EXPORT_SYMBOL(ufshcd_runtime_resume); 6377EXPORT_SYMBOL(ufshcd_runtime_resume);
6152 6378
@@ -6198,11 +6424,7 @@ void ufshcd_remove(struct ufs_hba *hba)
6198 ufshcd_disable_intr(hba, hba->intr_mask); 6424 ufshcd_disable_intr(hba, hba->intr_mask);
6199 ufshcd_hba_stop(hba, true); 6425 ufshcd_hba_stop(hba, true);
6200 6426
6201 scsi_host_put(hba->host);
6202
6203 ufshcd_exit_clk_gating(hba); 6427 ufshcd_exit_clk_gating(hba);
6204 if (ufshcd_is_clkscaling_enabled(hba))
6205 devfreq_remove_device(hba->devfreq);
6206 ufshcd_hba_exit(hba); 6428 ufshcd_hba_exit(hba);
6207} 6429}
6208EXPORT_SYMBOL_GPL(ufshcd_remove); 6430EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6324,15 +6546,47 @@ static int ufshcd_devfreq_target(struct device *dev,
6324{ 6546{
6325 int err = 0; 6547 int err = 0;
6326 struct ufs_hba *hba = dev_get_drvdata(dev); 6548 struct ufs_hba *hba = dev_get_drvdata(dev);
6549 bool release_clk_hold = false;
6550 unsigned long irq_flags;
6327 6551
6328 if (!ufshcd_is_clkscaling_enabled(hba)) 6552 if (!ufshcd_is_clkscaling_enabled(hba))
6329 return -EINVAL; 6553 return -EINVAL;
6330 6554
6555 spin_lock_irqsave(hba->host->host_lock, irq_flags);
6556 if (ufshcd_eh_in_progress(hba)) {
6557 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6558 return 0;
6559 }
6560
6561 if (ufshcd_is_clkgating_allowed(hba) &&
6562 (hba->clk_gating.state != CLKS_ON)) {
6563 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
6564 /* hold the vote until the scaling work is completed */
6565 hba->clk_gating.active_reqs++;
6566 release_clk_hold = true;
6567 hba->clk_gating.state = CLKS_ON;
6568 } else {
6569 /*
6570 * Clock gating work seems to be running in parallel
6571 * hence skip scaling work to avoid deadlock between
6572 * current scaling work and gating work.
6573 */
6574 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6575 return 0;
6576 }
6577 }
6578 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6579
6331 if (*freq == UINT_MAX) 6580 if (*freq == UINT_MAX)
6332 err = ufshcd_scale_clks(hba, true); 6581 err = ufshcd_scale_clks(hba, true);
6333 else if (*freq == 0) 6582 else if (*freq == 0)
6334 err = ufshcd_scale_clks(hba, false); 6583 err = ufshcd_scale_clks(hba, false);
6335 6584
6585 spin_lock_irqsave(hba->host->host_lock, irq_flags);
6586 if (release_clk_hold)
6587 __ufshcd_release(hba);
6588 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6589
6336 return err; 6590 return err;
6337} 6591}
6338 6592
@@ -6498,7 +6752,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6498 } 6752 }
6499 6753
6500 if (ufshcd_is_clkscaling_enabled(hba)) { 6754 if (ufshcd_is_clkscaling_enabled(hba)) {
6501 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile, 6755 hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
6502 "simple_ondemand", NULL); 6756 "simple_ondemand", NULL);
6503 if (IS_ERR(hba->devfreq)) { 6757 if (IS_ERR(hba->devfreq)) {
6504 dev_err(hba->dev, "Unable to register with devfreq %ld\n", 6758 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
@@ -6507,18 +6761,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6507 goto out_remove_scsi_host; 6761 goto out_remove_scsi_host;
6508 } 6762 }
6509 /* Suspend devfreq until the UFS device is detected */ 6763 /* Suspend devfreq until the UFS device is detected */
6510 devfreq_suspend_device(hba->devfreq); 6764 ufshcd_suspend_clkscaling(hba);
6511 hba->clk_scaling.window_start_t = 0;
6512 } 6765 }
6513 6766
6514 /* Hold auto suspend until async scan completes */ 6767 /* Hold auto suspend until async scan completes */
6515 pm_runtime_get_sync(dev); 6768 pm_runtime_get_sync(dev);
6516 6769
6517 /* 6770 /*
6518 * The device-initialize-sequence hasn't been invoked yet. 6771 * We are assuming that device wasn't put in sleep/power-down
6519 * Set the device to power-off state 6772 * state exclusively during the boot stage before kernel.
6773 * This assumption helps avoid doing link startup twice during
6774 * ufshcd_probe_hba().
6520 */ 6775 */
6521 ufshcd_set_ufs_dev_poweroff(hba); 6776 ufshcd_set_ufs_dev_active(hba);
6522 6777
6523 async_schedule(ufshcd_async_scan, hba); 6778 async_schedule(ufshcd_async_scan, hba);
6524 6779
@@ -6530,7 +6785,6 @@ exit_gating:
6530 ufshcd_exit_clk_gating(hba); 6785 ufshcd_exit_clk_gating(hba);
6531out_disable: 6786out_disable:
6532 hba->is_irq_enabled = false; 6787 hba->is_irq_enabled = false;
6533 scsi_host_put(host);
6534 ufshcd_hba_exit(hba); 6788 ufshcd_hba_exit(hba);
6535out_error: 6789out_error:
6536 return err; 6790 return err;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 430bef111293..7d9ff22acfea 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -261,6 +261,12 @@ struct ufs_pwr_mode_info {
261 * @pwr_change_notify: called before and after a power mode change 261 * @pwr_change_notify: called before and after a power mode change
262 * is carried out to allow vendor spesific capabilities 262 * is carried out to allow vendor spesific capabilities
263 * to be set. 263 * to be set.
264 * @setup_xfer_req: called before any transfer request is issued
265 * to set some things
266 * @setup_task_mgmt: called before any task management request is issued
267 * to set some things
268 * @hibern8_notify: called around hibern8 enter/exit
269 * to configure some things
264 * @suspend: called during host controller PM callback 270 * @suspend: called during host controller PM callback
265 * @resume: called during host controller PM callback 271 * @resume: called during host controller PM callback
266 * @dbg_register_dump: used to dump controller debug information 272 * @dbg_register_dump: used to dump controller debug information
@@ -273,7 +279,8 @@ struct ufs_hba_variant_ops {
273 u32 (*get_ufs_hci_version)(struct ufs_hba *); 279 u32 (*get_ufs_hci_version)(struct ufs_hba *);
274 int (*clk_scale_notify)(struct ufs_hba *, bool, 280 int (*clk_scale_notify)(struct ufs_hba *, bool,
275 enum ufs_notify_change_status); 281 enum ufs_notify_change_status);
276 int (*setup_clocks)(struct ufs_hba *, bool); 282 int (*setup_clocks)(struct ufs_hba *, bool,
283 enum ufs_notify_change_status);
277 int (*setup_regulators)(struct ufs_hba *, bool); 284 int (*setup_regulators)(struct ufs_hba *, bool);
278 int (*hce_enable_notify)(struct ufs_hba *, 285 int (*hce_enable_notify)(struct ufs_hba *,
279 enum ufs_notify_change_status); 286 enum ufs_notify_change_status);
@@ -283,6 +290,10 @@ struct ufs_hba_variant_ops {
283 enum ufs_notify_change_status status, 290 enum ufs_notify_change_status status,
284 struct ufs_pa_layer_attr *, 291 struct ufs_pa_layer_attr *,
285 struct ufs_pa_layer_attr *); 292 struct ufs_pa_layer_attr *);
293 void (*setup_xfer_req)(struct ufs_hba *, int, bool);
294 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
295 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
296 enum ufs_notify_change_status);
286 int (*suspend)(struct ufs_hba *, enum ufs_pm_op); 297 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
287 int (*resume)(struct ufs_hba *, enum ufs_pm_op); 298 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
288 void (*dbg_register_dump)(struct ufs_hba *hba); 299 void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -474,6 +485,12 @@ struct ufs_hba {
474 */ 485 */
475 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5) 486 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
476 487
488 /*
489 * This quirk needs to be enabled if the host contoller regards
490 * resolution of the values of PRDTO and PRDTL in UTRD as byte.
491 */
492 #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
493
477 unsigned int quirks; /* Deviations from standard UFSHCI spec. */ 494 unsigned int quirks; /* Deviations from standard UFSHCI spec. */
478 495
479 /* Device deviations from standard UFS device spec. */ 496 /* Device deviations from standard UFS device spec. */
@@ -755,10 +772,11 @@ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
755 return 0; 772 return 0;
756} 773}
757 774
758static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on) 775static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
776 enum ufs_notify_change_status status)
759{ 777{
760 if (hba->vops && hba->vops->setup_clocks) 778 if (hba->vops && hba->vops->setup_clocks)
761 return hba->vops->setup_clocks(hba, on); 779 return hba->vops->setup_clocks(hba, on, status);
762 return 0; 780 return 0;
763} 781}
764 782
@@ -799,6 +817,28 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
799 return -ENOTSUPP; 817 return -ENOTSUPP;
800} 818}
801 819
820static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
821 bool is_scsi_cmd)
822{
823 if (hba->vops && hba->vops->setup_xfer_req)
824 return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
825}
826
827static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
828 int tag, u8 tm_function)
829{
830 if (hba->vops && hba->vops->setup_task_mgmt)
831 return hba->vops->setup_task_mgmt(hba, tag, tm_function);
832}
833
834static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
835 enum uic_cmd_dme cmd,
836 enum ufs_notify_change_status status)
837{
838 if (hba->vops && hba->vops->hibern8_notify)
839 return hba->vops->hibern8_notify(hba, cmd, status);
840}
841
802static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) 842static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
803{ 843{
804 if (hba->vops && hba->vops->suspend) 844 if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9599741ff606..5d978867be57 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -83,6 +83,8 @@ enum {
83 MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, 83 MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
84}; 84};
85 85
86#define UFS_MASK(mask, offset) ((mask) << (offset))
87
86/* UFS Version 08h */ 88/* UFS Version 08h */
87#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0) 89#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
88#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16) 90#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
@@ -166,6 +168,7 @@ enum {
166/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */ 168/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
167#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31) 169#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
168#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F 170#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
171#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
169 172
170/* UECDL - Host UIC Error Code Data Link Layer 3Ch */ 173/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
171#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) 174#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index eff8b5675575..23129d7b2678 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -123,6 +123,7 @@
123#define PA_MAXRXHSGEAR 0x1587 123#define PA_MAXRXHSGEAR 0x1587
124#define PA_RXHSUNTERMCAP 0x15A5 124#define PA_RXHSUNTERMCAP 0x15A5
125#define PA_RXLSTERMCAP 0x15A6 125#define PA_RXLSTERMCAP 0x15A6
126#define PA_GRANULARITY 0x15AA
126#define PA_PACPREQTIMEOUT 0x1590 127#define PA_PACPREQTIMEOUT 0x1590
127#define PA_PACPREQEOBTIMEOUT 0x1591 128#define PA_PACPREQEOBTIMEOUT 0x1591
128#define PA_HIBERN8TIME 0x15A7 129#define PA_HIBERN8TIME 0x15A7
@@ -158,6 +159,9 @@
158#define VS_DEBUGOMC 0xD09E 159#define VS_DEBUGOMC 0xD09E
159#define VS_POWERSTATE 0xD083 160#define VS_POWERSTATE 0xD083
160 161
162#define PA_GRANULARITY_MIN_VAL 1
163#define PA_GRANULARITY_MAX_VAL 6
164
161/* PHY Adapter Protocol Constants */ 165/* PHY Adapter Protocol Constants */
162#define PA_MAXDATALANES 4 166#define PA_MAXDATALANES 4
163 167