aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-15 21:58:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-15 21:58:04 -0400
commit89a93f2f4834f8c126e8d9dd6b368d0b9e21ec3d (patch)
treee731456fec0cab1225ad3e806dc8d3efefa0a78b /drivers/scsi
parent260eddf4391f162a69d1d163729249635fa7a78f (diff)
parentfe9233fb6914a0eb20166c967e3020f7f0fba2c9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (102 commits) [SCSI] scsi_dh: fix kconfig related build errors [SCSI] sym53c8xx: Fix bogus sym_que_entry re-implementation of container_of [SCSI] scsi_cmnd.h: remove double inclusion of linux/blkdev.h [SCSI] make struct scsi_{host,target}_type static [SCSI] fix locking in host use of blk_plug_device() [SCSI] zfcp: Cleanup external header file [SCSI] zfcp: Cleanup code in zfcp_erp.c [SCSI] zfcp: zfcp_fsf cleanup. [SCSI] zfcp: consolidate sysfs things into one file. [SCSI] zfcp: Cleanup of code in zfcp_aux.c [SCSI] zfcp: Cleanup of code in zfcp_scsi.c [SCSI] zfcp: Move status accessors from zfcp to SCSI include file. [SCSI] zfcp: Small QDIO cleanups [SCSI] zfcp: Adapter reopen for large number of unsolicited status [SCSI] zfcp: Fix error checking for ELS ADISC requests [SCSI] zfcp: wait until adapter is finished with ERP during auto-port [SCSI] ibmvfc: IBM Power Virtual Fibre Channel Adapter Client Driver [SCSI] sg: Add target reset support [SCSI] lib: Add support for the T10 (SCSI) Data Integrity Field CRC [SCSI] sd: Move scsi_disk() accessor function to sd.h ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig27
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/commctrl.c33
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/device_handler/Kconfig32
-rw-r--r--drivers/scsi/device_handler/Makefile7
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c162
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c499
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c202
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c691
-rw-r--r--drivers/scsi/esp_scsi.c24
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ibmvscsi/Makefile1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3910
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h682
-rw-r--r--drivers/scsi/iscsi_tcp.c514
-rw-r--r--drivers/scsi/iscsi_tcp.h7
-rw-r--r--drivers/scsi/libiscsi.c1359
-rw-r--r--drivers/scsi/lpfc/lpfc.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c120
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c145
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c232
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c16
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c8
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_debug.c110
-rw-r--r--drivers/scsi/scsi_error.c11
-rw-r--r--drivers/scsi/scsi_lib.c35
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c395
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--drivers/scsi/sd.h62
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_misc.h4
43 files changed, 8086 insertions, 1553 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 81ccbd7f9e34..26be540d1dd3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -888,6 +888,25 @@ config SCSI_IBMVSCSIS
888 To compile this driver as a module, choose M here: the 888 To compile this driver as a module, choose M here: the
889 module will be called ibmvstgt. 889 module will be called ibmvstgt.
890 890
891config SCSI_IBMVFC
892 tristate "IBM Virtual FC support"
893 depends on PPC_PSERIES && SCSI
894 select SCSI_FC_ATTRS
895 help
896 This is the IBM POWER Virtual FC Client
897
898 To compile this driver as a module, choose M here: the
899 module will be called ibmvfc.
900
901config SCSI_IBMVFC_TRACE
902 bool "enable driver internal trace"
903 depends on SCSI_IBMVFC
904 default y
905 help
906 If you say Y here, the driver will trace all commands issued
907 to the adapter. Performance impact is minimal. Trace can be
908 dumped using /sys/class/scsi_host/hostXX/trace.
909
891config SCSI_INITIO 910config SCSI_INITIO
892 tristate "Initio 9100U(W) support" 911 tristate "Initio 9100U(W) support"
893 depends on PCI && SCSI 912 depends on PCI && SCSI
@@ -1738,10 +1757,12 @@ config SCSI_SUNESP
1738 select SCSI_SPI_ATTRS 1757 select SCSI_SPI_ATTRS
1739 help 1758 help
1740 This is the driver for the Sun ESP SCSI host adapter. The ESP 1759 This is the driver for the Sun ESP SCSI host adapter. The ESP
1741 chipset is present in most SPARC SBUS-based computers. 1760 chipset is present in most SPARC SBUS-based computers and
1761 supports the Emulex family of ESP SCSI chips (esp100, esp100A,
1762 esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip.
1742 1763
1743 To compile this driver as a module, choose M here: the 1764 To compile this driver as a module, choose M here: the
1744 module will be called esp. 1765 module will be called sun_esp.
1745 1766
1746config ZFCP 1767config ZFCP
1747 tristate "FCP host bus adapter driver for IBM eServer zSeries" 1768 tristate "FCP host bus adapter driver for IBM eServer zSeries"
@@ -1771,4 +1792,6 @@ endif # SCSI_LOWLEVEL
1771 1792
1772source "drivers/scsi/pcmcia/Kconfig" 1793source "drivers/scsi/pcmcia/Kconfig"
1773 1794
1795source "drivers/scsi/device_handler/Kconfig"
1796
1774endmenu 1797endmenu
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6c775e350c98..a8149677de23 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o 34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
35obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/ 35obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o 36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
37obj-$(CONFIG_SCSI_DH) += device_handler/
37 38
38obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o 39obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
39obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 40obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
@@ -118,6 +119,7 @@ obj-$(CONFIG_SCSI_IPR) += ipr.o
118obj-$(CONFIG_SCSI_SRP) += libsrp.o 119obj-$(CONFIG_SCSI_SRP) += libsrp.o
119obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ 120obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
120obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ 121obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
122obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
121obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 123obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
122obj-$(CONFIG_SCSI_STEX) += stex.o 124obj-$(CONFIG_SCSI_STEX) += stex.o
123obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 125obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 5fd83deab36c..a7355260cfcf 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -41,6 +41,7 @@
41#include <linux/kthread.h> 41#include <linux/kthread.h>
42#include <linux/semaphore.h> 42#include <linux/semaphore.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <scsi/scsi_host.h>
44 45
45#include "aacraid.h" 46#include "aacraid.h"
46 47
@@ -581,6 +582,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
581 for (i = 0; i < upsg->count; i++) { 582 for (i = 0; i < upsg->count; i++) {
582 u64 addr; 583 u64 addr;
583 void* p; 584 void* p;
585 if (upsg->sg[i].count >
586 (dev->adapter_info.options &
587 AAC_OPT_NEW_COMM) ?
588 (dev->scsi_host_ptr->max_sectors << 9) :
589 65536) {
590 rcode = -EINVAL;
591 goto cleanup;
592 }
584 /* Does this really need to be GFP_DMA? */ 593 /* Does this really need to be GFP_DMA? */
585 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); 594 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
586 if(!p) { 595 if(!p) {
@@ -625,6 +634,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
625 for (i = 0; i < usg->count; i++) { 634 for (i = 0; i < usg->count; i++) {
626 u64 addr; 635 u64 addr;
627 void* p; 636 void* p;
637 if (usg->sg[i].count >
638 (dev->adapter_info.options &
639 AAC_OPT_NEW_COMM) ?
640 (dev->scsi_host_ptr->max_sectors << 9) :
641 65536) {
642 rcode = -EINVAL;
643 goto cleanup;
644 }
628 /* Does this really need to be GFP_DMA? */ 645 /* Does this really need to be GFP_DMA? */
629 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 646 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
630 if(!p) { 647 if(!p) {
@@ -667,6 +684,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
667 for (i = 0; i < upsg->count; i++) { 684 for (i = 0; i < upsg->count; i++) {
668 uintptr_t addr; 685 uintptr_t addr;
669 void* p; 686 void* p;
687 if (usg->sg[i].count >
688 (dev->adapter_info.options &
689 AAC_OPT_NEW_COMM) ?
690 (dev->scsi_host_ptr->max_sectors << 9) :
691 65536) {
692 rcode = -EINVAL;
693 goto cleanup;
694 }
670 /* Does this really need to be GFP_DMA? */ 695 /* Does this really need to be GFP_DMA? */
671 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 696 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
672 if(!p) { 697 if(!p) {
@@ -698,6 +723,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
698 for (i = 0; i < upsg->count; i++) { 723 for (i = 0; i < upsg->count; i++) {
699 dma_addr_t addr; 724 dma_addr_t addr;
700 void* p; 725 void* p;
726 if (upsg->sg[i].count >
727 (dev->adapter_info.options &
728 AAC_OPT_NEW_COMM) ?
729 (dev->scsi_host_ptr->max_sectors << 9) :
730 65536) {
731 rcode = -EINVAL;
732 goto cleanup;
733 }
701 p = kmalloc(upsg->sg[i].count, GFP_KERNEL); 734 p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
702 if (!p) { 735 if (!p) {
703 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 736 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 68c140e82673..9aa301c1ed07 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -865,7 +865,7 @@ static ssize_t aac_show_bios_version(struct device *device,
865 return len; 865 return len;
866} 866}
867 867
868ssize_t aac_show_serial_number(struct device *device, 868static ssize_t aac_show_serial_number(struct device *device,
869 struct device_attribute *attr, char *buf) 869 struct device_attribute *attr, char *buf)
870{ 870{
871 struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; 871 struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
new file mode 100644
index 000000000000..2adc0f666b68
--- /dev/null
+++ b/drivers/scsi/device_handler/Kconfig
@@ -0,0 +1,32 @@
1#
2# SCSI Device Handler configuration
3#
4
5menuconfig SCSI_DH
6 tristate "SCSI Device Handlers"
7 depends on SCSI
8 default n
9 help
10 SCSI Device Handlers provide device specific support for
11 devices utilized in multipath configurations. Say Y here to
12 select support for specific hardware.
13
14config SCSI_DH_RDAC
15 tristate "LSI RDAC Device Handler"
16 depends on SCSI_DH
17 help
18 If you have a LSI RDAC select y. Otherwise, say N.
19
20config SCSI_DH_HP_SW
21 tristate "HP/COMPAQ MSA Device Handler"
22 depends on SCSI_DH
23 help
24 If you have a HP/COMPAQ MSA device that requires START_STOP to
25 be sent to start it and cannot upgrade the firmware then select y.
26 Otherwise, say N.
27
28config SCSI_DH_EMC
29 tristate "EMC CLARiiON Device Handler"
30 depends on SCSI_DH
31 help
32 If you have a EMC CLARiiON select y. Otherwise, say N.
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
new file mode 100644
index 000000000000..35272e93b1c8
--- /dev/null
+++ b/drivers/scsi/device_handler/Makefile
@@ -0,0 +1,7 @@
1#
2# SCSI Device Handler
3#
4obj-$(CONFIG_SCSI_DH) += scsi_dh.o
5obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
6obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
7obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
new file mode 100644
index 000000000000..ab6c21cd9689
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -0,0 +1,162 @@
1/*
2 * SCSI device handler infrastruture.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2007
19 * Authors:
20 * Chandra Seetharaman <sekharan@us.ibm.com>
21 * Mike Anderson <andmike@linux.vnet.ibm.com>
22 */
23
24#include <scsi/scsi_dh.h>
25#include "../scsi_priv.h"
26
27static DEFINE_SPINLOCK(list_lock);
28static LIST_HEAD(scsi_dh_list);
29
30static struct scsi_device_handler *get_device_handler(const char *name)
31{
32 struct scsi_device_handler *tmp, *found = NULL;
33
34 spin_lock(&list_lock);
35 list_for_each_entry(tmp, &scsi_dh_list, list) {
36 if (!strcmp(tmp->name, name)) {
37 found = tmp;
38 break;
39 }
40 }
41 spin_unlock(&list_lock);
42 return found;
43}
44
45static int scsi_dh_notifier_add(struct device *dev, void *data)
46{
47 struct scsi_device_handler *scsi_dh = data;
48
49 scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
50 return 0;
51}
52
53/*
54 * scsi_register_device_handler - register a device handler personality
55 * module.
56 * @scsi_dh - device handler to be registered.
57 *
58 * Returns 0 on success, -EBUSY if handler already registered.
59 */
60int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
61{
62 int ret = -EBUSY;
63 struct scsi_device_handler *tmp;
64
65 tmp = get_device_handler(scsi_dh->name);
66 if (tmp)
67 goto done;
68
69 ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
70
71 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
72 spin_lock(&list_lock);
73 list_add(&scsi_dh->list, &scsi_dh_list);
74 spin_unlock(&list_lock);
75
76done:
77 return ret;
78}
79EXPORT_SYMBOL_GPL(scsi_register_device_handler);
80
81static int scsi_dh_notifier_remove(struct device *dev, void *data)
82{
83 struct scsi_device_handler *scsi_dh = data;
84
85 scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
86 return 0;
87}
88
89/*
90 * scsi_unregister_device_handler - register a device handler personality
91 * module.
92 * @scsi_dh - device handler to be unregistered.
93 *
94 * Returns 0 on success, -ENODEV if handler not registered.
95 */
96int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
97{
98 int ret = -ENODEV;
99 struct scsi_device_handler *tmp;
100
101 tmp = get_device_handler(scsi_dh->name);
102 if (!tmp)
103 goto done;
104
105 ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb);
106
107 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
108 scsi_dh_notifier_remove);
109 spin_lock(&list_lock);
110 list_del(&scsi_dh->list);
111 spin_unlock(&list_lock);
112
113done:
114 return ret;
115}
116EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
117
118/*
119 * scsi_dh_activate - activate the path associated with the scsi_device
120 * corresponding to the given request queue.
121 * @q - Request queue that is associated with the scsi_device to be
122 * activated.
123 */
124int scsi_dh_activate(struct request_queue *q)
125{
126 int err = 0;
127 unsigned long flags;
128 struct scsi_device *sdev;
129 struct scsi_device_handler *scsi_dh = NULL;
130
131 spin_lock_irqsave(q->queue_lock, flags);
132 sdev = q->queuedata;
133 if (sdev && sdev->scsi_dh_data)
134 scsi_dh = sdev->scsi_dh_data->scsi_dh;
135 if (!scsi_dh || !get_device(&sdev->sdev_gendev))
136 err = SCSI_DH_NOSYS;
137 spin_unlock_irqrestore(q->queue_lock, flags);
138
139 if (err)
140 return err;
141
142 if (scsi_dh->activate)
143 err = scsi_dh->activate(sdev);
144 put_device(&sdev->sdev_gendev);
145 return err;
146}
147EXPORT_SYMBOL_GPL(scsi_dh_activate);
148
149/*
150 * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
151 * the given name. FALSE(0) otherwise.
152 * @name - name of the device handler.
153 */
154int scsi_dh_handler_exist(const char *name)
155{
156 return (get_device_handler(name) != NULL);
157}
158EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
159
160MODULE_DESCRIPTION("SCSI device handler");
161MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
162MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
new file mode 100644
index 000000000000..ed53f14007a2
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -0,0 +1,499 @@
1/*
2 * Target driver for EMC CLARiiON AX/CX-series hardware.
3 * Based on code from Lars Marowsky-Bree <lmb@suse.de>
4 * and Ed Goggin <egoggin@emc.com>.
5 *
6 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
7 * Copyright (C) 2006 Mike Christie
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, write to
21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23#include <scsi/scsi.h>
24#include <scsi/scsi_eh.h>
25#include <scsi/scsi_dh.h>
26#include <scsi/scsi_device.h>
27
28#define CLARIION_NAME "emc_clariion"
29
30#define CLARIION_TRESPASS_PAGE 0x22
31#define CLARIION_BUFFER_SIZE 0x80
32#define CLARIION_TIMEOUT (60 * HZ)
33#define CLARIION_RETRIES 3
34#define CLARIION_UNBOUND_LU -1
35
36static unsigned char long_trespass[] = {
37 0, 0, 0, 0,
38 CLARIION_TRESPASS_PAGE, /* Page code */
39 0x09, /* Page length - 2 */
40 0x81, /* Trespass code + Honor reservation bit */
41 0xff, 0xff, /* Trespass target */
42 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
43};
44
45static unsigned char long_trespass_hr[] = {
46 0, 0, 0, 0,
47 CLARIION_TRESPASS_PAGE, /* Page code */
48 0x09, /* Page length - 2 */
49 0x01, /* Trespass code + Honor reservation bit */
50 0xff, 0xff, /* Trespass target */
51 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
52};
53
54static unsigned char short_trespass[] = {
55 0, 0, 0, 0,
56 CLARIION_TRESPASS_PAGE, /* Page code */
57 0x02, /* Page length - 2 */
58 0x81, /* Trespass code + Honor reservation bit */
59 0xff, /* Trespass target */
60};
61
62static unsigned char short_trespass_hr[] = {
63 0, 0, 0, 0,
64 CLARIION_TRESPASS_PAGE, /* Page code */
65 0x02, /* Page length - 2 */
66 0x01, /* Trespass code + Honor reservation bit */
67 0xff, /* Trespass target */
68};
69
70struct clariion_dh_data {
71 /*
72 * Use short trespass command (FC-series) or the long version
73 * (default for AX/CX CLARiiON arrays).
74 */
75 unsigned short_trespass;
76 /*
77 * Whether or not (default) to honor SCSI reservations when
78 * initiating a switch-over.
79 */
80 unsigned hr;
81 /* I/O buffer for both MODE_SELECT and INQUIRY commands. */
82 char buffer[CLARIION_BUFFER_SIZE];
83 /*
84 * SCSI sense buffer for commands -- assumes serial issuance
85 * and completion sequence of all commands for same multipath.
86 */
87 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
88 /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */
89 int default_sp;
90 /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */
91 int current_sp;
92};
93
94static inline struct clariion_dh_data
95 *get_clariion_data(struct scsi_device *sdev)
96{
97 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
98 BUG_ON(scsi_dh_data == NULL);
99 return ((struct clariion_dh_data *) scsi_dh_data->buf);
100}
101
102/*
103 * Parse MODE_SELECT cmd reply.
104 */
105static int trespass_endio(struct scsi_device *sdev, int result)
106{
107 int err = SCSI_DH_OK;
108 struct scsi_sense_hdr sshdr;
109 struct clariion_dh_data *csdev = get_clariion_data(sdev);
110 char *sense = csdev->sense;
111
112 if (status_byte(result) == CHECK_CONDITION &&
113 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
114 sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
115 "0x%2x, 0x%2x while sending CLARiiON trespass "
116 "command.\n", sshdr.sense_key, sshdr.asc,
117 sshdr.ascq);
118
119 if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
120 (sshdr.ascq == 0x00)) {
121 /*
122 * Array based copy in progress -- do not send
123 * mode_select or copy will be aborted mid-stream.
124 */
125 sdev_printk(KERN_INFO, sdev, "Array Based Copy in "
126 "progress while sending CLARiiON trespass "
127 "command.\n");
128 err = SCSI_DH_DEV_TEMP_BUSY;
129 } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
130 (sshdr.ascq == 0x03)) {
131 /*
132 * LUN Not Ready - Manual Intervention Required
133 * indicates in-progress ucode upgrade (NDU).
134 */
135 sdev_printk(KERN_INFO, sdev, "Detected in-progress "
136 "ucode upgrade NDU operation while sending "
137 "CLARiiON trespass command.\n");
138 err = SCSI_DH_DEV_TEMP_BUSY;
139 } else
140 err = SCSI_DH_DEV_FAILED;
141 } else if (result) {
142 sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending "
143 "CLARiiON trespass command.\n", result);
144 err = SCSI_DH_IO;
145 }
146
147 return err;
148}
149
150static int parse_sp_info_reply(struct scsi_device *sdev, int result,
151 int *default_sp, int *current_sp, int *new_current_sp)
152{
153 int err = SCSI_DH_OK;
154 struct clariion_dh_data *csdev = get_clariion_data(sdev);
155
156 if (result == 0) {
157 /* check for in-progress ucode upgrade (NDU) */
158 if (csdev->buffer[48] != 0) {
159 sdev_printk(KERN_NOTICE, sdev, "Detected in-progress "
160 "ucode upgrade NDU operation while finding "
161 "current active SP.");
162 err = SCSI_DH_DEV_TEMP_BUSY;
163 } else {
164 *default_sp = csdev->buffer[5];
165
166 if (csdev->buffer[4] == 2)
167 /* SP for path is current */
168 *current_sp = csdev->buffer[8];
169 else {
170 if (csdev->buffer[4] == 1)
171 /* SP for this path is NOT current */
172 if (csdev->buffer[8] == 0)
173 *current_sp = 1;
174 else
175 *current_sp = 0;
176 else
177 /* unbound LU or LUNZ */
178 *current_sp = CLARIION_UNBOUND_LU;
179 }
180 *new_current_sp = csdev->buffer[8];
181 }
182 } else {
183 struct scsi_sense_hdr sshdr;
184
185 err = SCSI_DH_IO;
186
187 if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
188 &sshdr))
189 sdev_printk(KERN_ERR, sdev, "Found valid sense data "
190 "0x%2x, 0x%2x, 0x%2x while finding current "
191 "active SP.", sshdr.sense_key, sshdr.asc,
192 sshdr.ascq);
193 else
194 sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
195 "current active SP.", result);
196 }
197
198 return err;
199}
200
201static int sp_info_endio(struct scsi_device *sdev, int result,
202 int mode_select_sent, int *done)
203{
204 struct clariion_dh_data *csdev = get_clariion_data(sdev);
205 int err_flags, default_sp, current_sp, new_current_sp;
206
207 err_flags = parse_sp_info_reply(sdev, result, &default_sp,
208 &current_sp, &new_current_sp);
209
210 if (err_flags != SCSI_DH_OK)
211 goto done;
212
213 if (mode_select_sent) {
214 csdev->default_sp = default_sp;
215 csdev->current_sp = current_sp;
216 } else {
217 /*
218 * Issue the actual module_selec request IFF either
219 * (1) we do not know the identity of the current SP OR
220 * (2) what we think we know is actually correct.
221 */
222 if ((current_sp != CLARIION_UNBOUND_LU) &&
223 (new_current_sp != current_sp)) {
224
225 csdev->default_sp = default_sp;
226 csdev->current_sp = current_sp;
227
228 sdev_printk(KERN_INFO, sdev, "Ignoring path group "
229 "switch-over command for CLARiiON SP%s since "
230 " mapped device is already initialized.",
231 current_sp ? "B" : "A");
232 if (done)
233 *done = 1; /* as good as doing it */
234 }
235 }
236done:
237 return err_flags;
238}
239
240/*
241* Get block request for REQ_BLOCK_PC command issued to path. Currently
242* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
243*
244* Uses data and sense buffers in hardware handler context structure and
245* assumes serial servicing of commands, both issuance and completion.
246*/
247static struct request *get_req(struct scsi_device *sdev, int cmd)
248{
249 struct clariion_dh_data *csdev = get_clariion_data(sdev);
250 struct request *rq;
251 unsigned char *page22;
252 int len = 0;
253
254 rq = blk_get_request(sdev->request_queue,
255 (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
256 if (!rq) {
257 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
258 return NULL;
259 }
260
261 memset(&rq->cmd, 0, BLK_MAX_CDB);
262 rq->cmd[0] = cmd;
263 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
264
265 switch (cmd) {
266 case MODE_SELECT:
267 if (csdev->short_trespass) {
268 page22 = csdev->hr ? short_trespass_hr : short_trespass;
269 len = sizeof(short_trespass);
270 } else {
271 page22 = csdev->hr ? long_trespass_hr : long_trespass;
272 len = sizeof(long_trespass);
273 }
274 /*
275 * Can't DMA from kernel BSS -- must copy selected trespass
276 * command mode page contents to context buffer which is
277 * allocated by kmalloc.
278 */
279 BUG_ON((len > CLARIION_BUFFER_SIZE));
280 memcpy(csdev->buffer, page22, len);
281 rq->cmd_flags |= REQ_RW;
282 rq->cmd[1] = 0x10;
283 break;
284 case INQUIRY:
285 rq->cmd[1] = 0x1;
286 rq->cmd[2] = 0xC0;
287 len = CLARIION_BUFFER_SIZE;
288 memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
289 break;
290 default:
291 BUG_ON(1);
292 break;
293 }
294
295 rq->cmd[4] = len;
296 rq->cmd_type = REQ_TYPE_BLOCK_PC;
297 rq->cmd_flags |= REQ_FAILFAST;
298 rq->timeout = CLARIION_TIMEOUT;
299 rq->retries = CLARIION_RETRIES;
300
301 rq->sense = csdev->sense;
302 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
303 rq->sense_len = 0;
304
305 if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
306 len, GFP_ATOMIC)) {
307 __blk_put_request(rq->q, rq);
308 return NULL;
309 }
310
311 return rq;
312}
313
314static int send_cmd(struct scsi_device *sdev, int cmd)
315{
316 struct request *rq = get_req(sdev, cmd);
317
318 if (!rq)
319 return SCSI_DH_RES_TEMP_UNAVAIL;
320
321 return blk_execute_rq(sdev->request_queue, NULL, rq, 1);
322}
323
324static int clariion_activate(struct scsi_device *sdev)
325{
326 int result, done = 0;
327
328 result = send_cmd(sdev, INQUIRY);
329 result = sp_info_endio(sdev, result, 0, &done);
330 if (result || done)
331 goto done;
332
333 result = send_cmd(sdev, MODE_SELECT);
334 result = trespass_endio(sdev, result);
335 if (result)
336 goto done;
337
338 result = send_cmd(sdev, INQUIRY);
339 result = sp_info_endio(sdev, result, 1, NULL);
340done:
341 return result;
342}
343
344static int clariion_check_sense(struct scsi_device *sdev,
345 struct scsi_sense_hdr *sense_hdr)
346{
347 switch (sense_hdr->sense_key) {
348 case NOT_READY:
349 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03)
350 /*
351 * LUN Not Ready - Manual Intervention Required
352 * indicates this is a passive path.
353 *
354 * FIXME: However, if this is seen and EVPD C0
355 * indicates that this is due to a NDU in
356 * progress, we should set FAIL_PATH too.
357 * This indicates we might have to do a SCSI
358 * inquiry in the end_io path. Ugh.
359 *
360 * Can return FAILED only when we want the error
361 * recovery process to kick in.
362 */
363 return SUCCESS;
364 break;
365 case ILLEGAL_REQUEST:
366 if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01)
367 /*
368 * An array based copy is in progress. Do not
369 * fail the path, do not bypass to another PG,
370 * do not retry. Fail the IO immediately.
371 * (Actually this is the same conclusion as in
372 * the default handler, but lets make sure.)
373 *
374 * Can return FAILED only when we want the error
375 * recovery process to kick in.
376 */
377 return SUCCESS;
378 break;
379 case UNIT_ATTENTION:
380 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
381 /*
382 * Unit Attention Code. This is the first IO
383 * to the new path, so just retry.
384 */
385 return NEEDS_RETRY;
386 break;
387 }
388
389 /* success just means we do not care what scsi-ml does */
390 return SUCCESS;
391}
392
393static const struct {
394 char *vendor;
395 char *model;
396} clariion_dev_list[] = {
397 {"DGC", "RAID"},
398 {"DGC", "DISK"},
399 {NULL, NULL},
400};
401
402static int clariion_bus_notify(struct notifier_block *, unsigned long, void *);
403
404static struct scsi_device_handler clariion_dh = {
405 .name = CLARIION_NAME,
406 .module = THIS_MODULE,
407 .nb.notifier_call = clariion_bus_notify,
408 .check_sense = clariion_check_sense,
409 .activate = clariion_activate,
410};
411
412/*
413 * TODO: need some interface so we can set trespass values
414 */
415static int clariion_bus_notify(struct notifier_block *nb,
416 unsigned long action, void *data)
417{
418 struct device *dev = data;
419 struct scsi_device *sdev = to_scsi_device(dev);
420 struct scsi_dh_data *scsi_dh_data;
421 struct clariion_dh_data *h;
422 int i, found = 0;
423 unsigned long flags;
424
425 if (action == BUS_NOTIFY_ADD_DEVICE) {
426 for (i = 0; clariion_dev_list[i].vendor; i++) {
427 if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
428 strlen(clariion_dev_list[i].vendor)) &&
429 !strncmp(sdev->model, clariion_dev_list[i].model,
430 strlen(clariion_dev_list[i].model))) {
431 found = 1;
432 break;
433 }
434 }
435 if (!found)
436 goto out;
437
438 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
439 + sizeof(*h) , GFP_KERNEL);
440 if (!scsi_dh_data) {
441 sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
442 CLARIION_NAME);
443 goto out;
444 }
445
446 scsi_dh_data->scsi_dh = &clariion_dh;
447 h = (struct clariion_dh_data *) scsi_dh_data->buf;
448 h->default_sp = CLARIION_UNBOUND_LU;
449 h->current_sp = CLARIION_UNBOUND_LU;
450
451 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
452 sdev->scsi_dh_data = scsi_dh_data;
453 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
454
455 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME);
456 try_module_get(THIS_MODULE);
457
458 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
459 if (sdev->scsi_dh_data == NULL ||
460 sdev->scsi_dh_data->scsi_dh != &clariion_dh)
461 goto out;
462
463 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
464 scsi_dh_data = sdev->scsi_dh_data;
465 sdev->scsi_dh_data = NULL;
466 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
467
468 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n",
469 CLARIION_NAME);
470
471 kfree(scsi_dh_data);
472 module_put(THIS_MODULE);
473 }
474
475out:
476 return 0;
477}
478
479static int __init clariion_init(void)
480{
481 int r;
482
483 r = scsi_register_device_handler(&clariion_dh);
484 if (r != 0)
485 printk(KERN_ERR "Failed to register scsi device handler.");
486 return r;
487}
488
489static void __exit clariion_exit(void)
490{
491 scsi_unregister_device_handler(&clariion_dh);
492}
493
494module_init(clariion_init);
495module_exit(clariion_exit);
496
497MODULE_DESCRIPTION("EMC CX/AX/FC-family driver");
498MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>");
499MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
new file mode 100644
index 000000000000..12ceab7b3662
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -0,0 +1,202 @@
1/*
2 * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be
3 * upgraded.
4 *
5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2006 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_dbg.h>
25#include <scsi/scsi_eh.h>
26#include <scsi/scsi_dh.h>
27
28#define HP_SW_NAME "hp_sw"
29
30#define HP_SW_TIMEOUT (60 * HZ)
31#define HP_SW_RETRIES 3
32
33struct hp_sw_dh_data {
34 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
35 int retries;
36};
37
38static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
39{
40 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
41 BUG_ON(scsi_dh_data == NULL);
42 return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
43}
44
45static int hp_sw_done(struct scsi_device *sdev)
46{
47 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
48 struct scsi_sense_hdr sshdr;
49 int rc;
50
51 sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
52
53 rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
54 if (!rc)
55 goto done;
56 switch (sshdr.sense_key) {
57 case NOT_READY:
58 if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
59 rc = SCSI_DH_RETRY;
60 h->retries++;
61 break;
62 }
63 /* fall through */
64 default:
65 h->retries++;
66 rc = SCSI_DH_IMM_RETRY;
67 }
68
69done:
70 if (rc == SCSI_DH_OK || rc == SCSI_DH_IO)
71 h->retries = 0;
72 else if (h->retries > HP_SW_RETRIES) {
73 h->retries = 0;
74 rc = SCSI_DH_IO;
75 }
76 return rc;
77}
78
79static int hp_sw_activate(struct scsi_device *sdev)
80{
81 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
82 struct request *req;
83 int ret = SCSI_DH_RES_TEMP_UNAVAIL;
84
85 req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC);
86 if (!req)
87 goto done;
88
89 sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
90
91 req->cmd_type = REQ_TYPE_BLOCK_PC;
92 req->cmd_flags |= REQ_FAILFAST;
93 req->cmd_len = COMMAND_SIZE(START_STOP);
94 memset(req->cmd, 0, MAX_COMMAND_SIZE);
95 req->cmd[0] = START_STOP;
96 req->cmd[4] = 1; /* Start spin cycle */
97 req->timeout = HP_SW_TIMEOUT;
98 req->sense = h->sense;
99 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
100 req->sense_len = 0;
101
102 ret = blk_execute_rq(req->q, NULL, req, 1);
103 if (!ret) /* SUCCESS */
104 ret = hp_sw_done(sdev);
105 else
106 ret = SCSI_DH_IO;
107done:
108 return ret;
109}
110
111static const struct {
112 char *vendor;
113 char *model;
114} hp_sw_dh_data_list[] = {
115 {"COMPAQ", "MSA"},
116 {"HP", "HSV"},
117 {"DEC", "HSG80"},
118 {NULL, NULL},
119};
120
121static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *);
122
123static struct scsi_device_handler hp_sw_dh = {
124 .name = HP_SW_NAME,
125 .module = THIS_MODULE,
126 .nb.notifier_call = hp_sw_bus_notify,
127 .activate = hp_sw_activate,
128};
129
130static int hp_sw_bus_notify(struct notifier_block *nb,
131 unsigned long action, void *data)
132{
133 struct device *dev = data;
134 struct scsi_device *sdev = to_scsi_device(dev);
135 struct scsi_dh_data *scsi_dh_data;
136 int i, found = 0;
137 unsigned long flags;
138
139 if (action == BUS_NOTIFY_ADD_DEVICE) {
140 for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
141 if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
142 strlen(hp_sw_dh_data_list[i].vendor)) &&
143 !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
144 strlen(hp_sw_dh_data_list[i].model))) {
145 found = 1;
146 break;
147 }
148 }
149 if (!found)
150 goto out;
151
152 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
153 + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
154 if (!scsi_dh_data) {
155 sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
156 HP_SW_NAME);
157 goto out;
158 }
159
160 scsi_dh_data->scsi_dh = &hp_sw_dh;
161 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
162 sdev->scsi_dh_data = scsi_dh_data;
163 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
164 try_module_get(THIS_MODULE);
165
166 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME);
167 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
168 if (sdev->scsi_dh_data == NULL ||
169 sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
170 goto out;
171
172 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
173 scsi_dh_data = sdev->scsi_dh_data;
174 sdev->scsi_dh_data = NULL;
175 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
176 module_put(THIS_MODULE);
177
178 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME);
179
180 kfree(scsi_dh_data);
181 }
182
183out:
184 return 0;
185}
186
187static int __init hp_sw_init(void)
188{
189 return scsi_register_device_handler(&hp_sw_dh);
190}
191
192static void __exit hp_sw_exit(void)
193{
194 scsi_unregister_device_handler(&hp_sw_dh);
195}
196
197module_init(hp_sw_init);
198module_exit(hp_sw_exit);
199
200MODULE_DESCRIPTION("HP MSA 1000");
201MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
202MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
new file mode 100644
index 000000000000..6fff077a888d
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -0,0 +1,691 @@
1/*
2 * Engenio/LSI RDAC SCSI Device Handler
3 *
4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22#include <scsi/scsi.h>
23#include <scsi/scsi_eh.h>
24#include <scsi/scsi_dh.h>
25
26#define RDAC_NAME "rdac"
27
28/*
29 * LSI mode page stuff
30 *
31 * These struct definitions and the forming of the
32 * mode page were taken from the LSI RDAC 2.4 GPL'd
33 * driver, and then converted to Linux conventions.
34 */
35#define RDAC_QUIESCENCE_TIME 20;
36/*
37 * Page Codes
38 */
39#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
40
41/*
42 * Controller modes definitions
43 */
44#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
45
46/*
47 * RDAC Options field
48 */
49#define RDAC_FORCED_QUIESENCE 0x02
50
51#define RDAC_TIMEOUT (60 * HZ)
52#define RDAC_RETRIES 3
53
54struct rdac_mode_6_hdr {
55 u8 data_len;
56 u8 medium_type;
57 u8 device_params;
58 u8 block_desc_len;
59};
60
61struct rdac_mode_10_hdr {
62 u16 data_len;
63 u8 medium_type;
64 u8 device_params;
65 u16 reserved;
66 u16 block_desc_len;
67};
68
69struct rdac_mode_common {
70 u8 controller_serial[16];
71 u8 alt_controller_serial[16];
72 u8 rdac_mode[2];
73 u8 alt_rdac_mode[2];
74 u8 quiescence_timeout;
75 u8 rdac_options;
76};
77
78struct rdac_pg_legacy {
79 struct rdac_mode_6_hdr hdr;
80 u8 page_code;
81 u8 page_len;
82 struct rdac_mode_common common;
83#define MODE6_MAX_LUN 32
84 u8 lun_table[MODE6_MAX_LUN];
85 u8 reserved2[32];
86 u8 reserved3;
87 u8 reserved4;
88};
89
90struct rdac_pg_expanded {
91 struct rdac_mode_10_hdr hdr;
92 u8 page_code;
93 u8 subpage_code;
94 u8 page_len[2];
95 struct rdac_mode_common common;
96 u8 lun_table[256];
97 u8 reserved3;
98 u8 reserved4;
99};
100
101struct c9_inquiry {
102 u8 peripheral_info;
103 u8 page_code; /* 0xC9 */
104 u8 reserved1;
105 u8 page_len;
106 u8 page_id[4]; /* "vace" */
107 u8 avte_cvp;
108 u8 path_prio;
109 u8 reserved2[38];
110};
111
112#define SUBSYS_ID_LEN 16
113#define SLOT_ID_LEN 2
114
115struct c4_inquiry {
116 u8 peripheral_info;
117 u8 page_code; /* 0xC4 */
118 u8 reserved1;
119 u8 page_len;
120 u8 page_id[4]; /* "subs" */
121 u8 subsys_id[SUBSYS_ID_LEN];
122 u8 revision[4];
123 u8 slot_id[SLOT_ID_LEN];
124 u8 reserved[2];
125};
126
127struct rdac_controller {
128 u8 subsys_id[SUBSYS_ID_LEN];
129 u8 slot_id[SLOT_ID_LEN];
130 int use_ms10;
131 struct kref kref;
132 struct list_head node; /* list of all controllers */
133 union {
134 struct rdac_pg_legacy legacy;
135 struct rdac_pg_expanded expanded;
136 } mode_select;
137};
138struct c8_inquiry {
139 u8 peripheral_info;
140 u8 page_code; /* 0xC8 */
141 u8 reserved1;
142 u8 page_len;
143 u8 page_id[4]; /* "edid" */
144 u8 reserved2[3];
145 u8 vol_uniq_id_len;
146 u8 vol_uniq_id[16];
147 u8 vol_user_label_len;
148 u8 vol_user_label[60];
149 u8 array_uniq_id_len;
150 u8 array_unique_id[16];
151 u8 array_user_label_len;
152 u8 array_user_label[60];
153 u8 lun[8];
154};
155
156struct c2_inquiry {
157 u8 peripheral_info;
158 u8 page_code; /* 0xC2 */
159 u8 reserved1;
160 u8 page_len;
161 u8 page_id[4]; /* "swr4" */
162 u8 sw_version[3];
163 u8 sw_date[3];
164 u8 features_enabled;
165 u8 max_lun_supported;
166 u8 partitions[239]; /* Total allocation length should be 0xFF */
167};
168
169struct rdac_dh_data {
170 struct rdac_controller *ctlr;
171#define UNINITIALIZED_LUN (1 << 8)
172 unsigned lun;
173#define RDAC_STATE_ACTIVE 0
174#define RDAC_STATE_PASSIVE 1
175 unsigned char state;
176 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
177 union {
178 struct c2_inquiry c2;
179 struct c4_inquiry c4;
180 struct c8_inquiry c8;
181 struct c9_inquiry c9;
182 } inq;
183};
184
185static LIST_HEAD(ctlr_list);
186static DEFINE_SPINLOCK(list_lock);
187
188static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
189{
190 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
191 BUG_ON(scsi_dh_data == NULL);
192 return ((struct rdac_dh_data *) scsi_dh_data->buf);
193}
194
195static struct request *get_rdac_req(struct scsi_device *sdev,
196 void *buffer, unsigned buflen, int rw)
197{
198 struct request *rq;
199 struct request_queue *q = sdev->request_queue;
200 struct rdac_dh_data *h = get_rdac_data(sdev);
201
202 rq = blk_get_request(q, rw, GFP_KERNEL);
203
204 if (!rq) {
205 sdev_printk(KERN_INFO, sdev,
206 "get_rdac_req: blk_get_request failed.\n");
207 return NULL;
208 }
209
210 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
211 blk_put_request(rq);
212 sdev_printk(KERN_INFO, sdev,
213 "get_rdac_req: blk_rq_map_kern failed.\n");
214 return NULL;
215 }
216
217 memset(&rq->cmd, 0, BLK_MAX_CDB);
218 rq->sense = h->sense;
219 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
220 rq->sense_len = 0;
221
222 rq->cmd_type = REQ_TYPE_BLOCK_PC;
223 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
224 rq->retries = RDAC_RETRIES;
225 rq->timeout = RDAC_TIMEOUT;
226
227 return rq;
228}
229
230static struct request *rdac_failover_get(struct scsi_device *sdev)
231{
232 struct request *rq;
233 struct rdac_mode_common *common;
234 unsigned data_size;
235 struct rdac_dh_data *h = get_rdac_data(sdev);
236
237 if (h->ctlr->use_ms10) {
238 struct rdac_pg_expanded *rdac_pg;
239
240 data_size = sizeof(struct rdac_pg_expanded);
241 rdac_pg = &h->ctlr->mode_select.expanded;
242 memset(rdac_pg, 0, data_size);
243 common = &rdac_pg->common;
244 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
245 rdac_pg->subpage_code = 0x1;
246 rdac_pg->page_len[0] = 0x01;
247 rdac_pg->page_len[1] = 0x28;
248 rdac_pg->lun_table[h->lun] = 0x81;
249 } else {
250 struct rdac_pg_legacy *rdac_pg;
251
252 data_size = sizeof(struct rdac_pg_legacy);
253 rdac_pg = &h->ctlr->mode_select.legacy;
254 memset(rdac_pg, 0, data_size);
255 common = &rdac_pg->common;
256 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
257 rdac_pg->page_len = 0x68;
258 rdac_pg->lun_table[h->lun] = 0x81;
259 }
260 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
261 common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
262 common->rdac_options = RDAC_FORCED_QUIESENCE;
263
264 /* get request for block layer packet command */
265 rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
266 if (!rq)
267 return NULL;
268
269 /* Prepare the command. */
270 if (h->ctlr->use_ms10) {
271 rq->cmd[0] = MODE_SELECT_10;
272 rq->cmd[7] = data_size >> 8;
273 rq->cmd[8] = data_size & 0xff;
274 } else {
275 rq->cmd[0] = MODE_SELECT;
276 rq->cmd[4] = data_size;
277 }
278 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
279
280 return rq;
281}
282
283static void release_controller(struct kref *kref)
284{
285 struct rdac_controller *ctlr;
286 ctlr = container_of(kref, struct rdac_controller, kref);
287
288 spin_lock(&list_lock);
289 list_del(&ctlr->node);
290 spin_unlock(&list_lock);
291 kfree(ctlr);
292}
293
294static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
295{
296 struct rdac_controller *ctlr, *tmp;
297
298 spin_lock(&list_lock);
299
300 list_for_each_entry(tmp, &ctlr_list, node) {
301 if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
302 (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
303 kref_get(&tmp->kref);
304 spin_unlock(&list_lock);
305 return tmp;
306 }
307 }
308 ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
309 if (!ctlr)
310 goto done;
311
312 /* initialize fields of controller */
313 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
314 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
315 kref_init(&ctlr->kref);
316 ctlr->use_ms10 = -1;
317 list_add(&ctlr->node, &ctlr_list);
318done:
319 spin_unlock(&list_lock);
320 return ctlr;
321}
322
323static int submit_inquiry(struct scsi_device *sdev, int page_code,
324 unsigned int len)
325{
326 struct request *rq;
327 struct request_queue *q = sdev->request_queue;
328 struct rdac_dh_data *h = get_rdac_data(sdev);
329 int err = SCSI_DH_RES_TEMP_UNAVAIL;
330
331 rq = get_rdac_req(sdev, &h->inq, len, READ);
332 if (!rq)
333 goto done;
334
335 /* Prepare the command. */
336 rq->cmd[0] = INQUIRY;
337 rq->cmd[1] = 1;
338 rq->cmd[2] = page_code;
339 rq->cmd[4] = len;
340 rq->cmd_len = COMMAND_SIZE(INQUIRY);
341 err = blk_execute_rq(q, NULL, rq, 1);
342 if (err == -EIO)
343 err = SCSI_DH_IO;
344done:
345 return err;
346}
347
348static int get_lun(struct scsi_device *sdev)
349{
350 int err;
351 struct c8_inquiry *inqp;
352 struct rdac_dh_data *h = get_rdac_data(sdev);
353
354 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry));
355 if (err == SCSI_DH_OK) {
356 inqp = &h->inq.c8;
357 h->lun = inqp->lun[7]; /* currently it uses only one byte */
358 }
359 return err;
360}
361
362#define RDAC_OWNED 0
363#define RDAC_UNOWNED 1
364#define RDAC_FAILED 2
365static int check_ownership(struct scsi_device *sdev)
366{
367 int err;
368 struct c9_inquiry *inqp;
369 struct rdac_dh_data *h = get_rdac_data(sdev);
370
371 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry));
372 if (err == SCSI_DH_OK) {
373 err = RDAC_UNOWNED;
374 inqp = &h->inq.c9;
375 /*
376 * If in AVT mode or if the path already owns the LUN,
377 * return RDAC_OWNED;
378 */
379 if (((inqp->avte_cvp >> 7) == 0x1) ||
380 ((inqp->avte_cvp & 0x1) != 0))
381 err = RDAC_OWNED;
382 } else
383 err = RDAC_FAILED;
384 return err;
385}
386
387static int initialize_controller(struct scsi_device *sdev)
388{
389 int err;
390 struct c4_inquiry *inqp;
391 struct rdac_dh_data *h = get_rdac_data(sdev);
392
393 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry));
394 if (err == SCSI_DH_OK) {
395 inqp = &h->inq.c4;
396 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
397 if (!h->ctlr)
398 err = SCSI_DH_RES_TEMP_UNAVAIL;
399 }
400 return err;
401}
402
403static int set_mode_select(struct scsi_device *sdev)
404{
405 int err;
406 struct c2_inquiry *inqp;
407 struct rdac_dh_data *h = get_rdac_data(sdev);
408
409 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry));
410 if (err == SCSI_DH_OK) {
411 inqp = &h->inq.c2;
412 /*
413 * If more than MODE6_MAX_LUN luns are supported, use
414 * mode select 10
415 */
416 if (inqp->max_lun_supported >= MODE6_MAX_LUN)
417 h->ctlr->use_ms10 = 1;
418 else
419 h->ctlr->use_ms10 = 0;
420 }
421 return err;
422}
423
424static int mode_select_handle_sense(struct scsi_device *sdev)
425{
426 struct scsi_sense_hdr sense_hdr;
427 struct rdac_dh_data *h = get_rdac_data(sdev);
428 int sense, err = SCSI_DH_IO, ret;
429
430 ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
431 if (!ret)
432 goto done;
433
434 err = SCSI_DH_OK;
435 sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
436 sense_hdr.ascq;
437 /* If it is retryable failure, submit the c9 inquiry again */
438 if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
439 sense == 0x62900) {
440 /* 0x59136 - Command lock contention
441 * 0x[6b]8b02 - Quiesense in progress or achieved
442 * 0x62900 - Power On, Reset, or Bus Device Reset
443 */
444 err = SCSI_DH_RETRY;
445 }
446
447 if (sense)
448 sdev_printk(KERN_INFO, sdev,
449 "MODE_SELECT failed with sense 0x%x.\n", sense);
450done:
451 return err;
452}
453
454static int send_mode_select(struct scsi_device *sdev)
455{
456 struct request *rq;
457 struct request_queue *q = sdev->request_queue;
458 struct rdac_dh_data *h = get_rdac_data(sdev);
459 int err = SCSI_DH_RES_TEMP_UNAVAIL;
460
461 rq = rdac_failover_get(sdev);
462 if (!rq)
463 goto done;
464
465 sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n");
466
467 err = blk_execute_rq(q, NULL, rq, 1);
468 if (err != SCSI_DH_OK)
469 err = mode_select_handle_sense(sdev);
470 if (err == SCSI_DH_OK)
471 h->state = RDAC_STATE_ACTIVE;
472done:
473 return err;
474}
475
476static int rdac_activate(struct scsi_device *sdev)
477{
478 struct rdac_dh_data *h = get_rdac_data(sdev);
479 int err = SCSI_DH_OK;
480
481 if (h->lun == UNINITIALIZED_LUN) {
482 err = get_lun(sdev);
483 if (err != SCSI_DH_OK)
484 goto done;
485 }
486
487 err = check_ownership(sdev);
488 switch (err) {
489 case RDAC_UNOWNED:
490 break;
491 case RDAC_OWNED:
492 err = SCSI_DH_OK;
493 goto done;
494 case RDAC_FAILED:
495 default:
496 err = SCSI_DH_IO;
497 goto done;
498 }
499
500 if (!h->ctlr) {
501 err = initialize_controller(sdev);
502 if (err != SCSI_DH_OK)
503 goto done;
504 }
505
506 if (h->ctlr->use_ms10 == -1) {
507 err = set_mode_select(sdev);
508 if (err != SCSI_DH_OK)
509 goto done;
510 }
511
512 err = send_mode_select(sdev);
513done:
514 return err;
515}
516
517static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
518{
519 struct rdac_dh_data *h = get_rdac_data(sdev);
520 int ret = BLKPREP_OK;
521
522 if (h->state != RDAC_STATE_ACTIVE) {
523 ret = BLKPREP_KILL;
524 req->cmd_flags |= REQ_QUIET;
525 }
526 return ret;
527
528}
529
530static int rdac_check_sense(struct scsi_device *sdev,
531 struct scsi_sense_hdr *sense_hdr)
532{
533 struct rdac_dh_data *h = get_rdac_data(sdev);
534 switch (sense_hdr->sense_key) {
535 case NOT_READY:
536 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
537 /* LUN Not Ready - Storage firmware incompatible
538 * Manual code synchonisation required.
539 *
540 * Nothing we can do here. Try to bypass the path.
541 */
542 return SUCCESS;
543 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
544 /* LUN Not Ready - Quiescense in progress
545 *
546 * Just retry and wait.
547 */
548 return NEEDS_RETRY;
549 break;
550 case ILLEGAL_REQUEST:
551 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
552 /* Invalid Request - Current Logical Unit Ownership.
553 * Controller is not the current owner of the LUN,
554 * Fail the path, so that the other path be used.
555 */
556 h->state = RDAC_STATE_PASSIVE;
557 return SUCCESS;
558 }
559 break;
560 case UNIT_ATTENTION:
561 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
562 /*
563 * Power On, Reset, or Bus Device Reset, just retry.
564 */
565 return NEEDS_RETRY;
566 break;
567 }
568 /* success just means we do not care what scsi-ml does */
569 return SCSI_RETURN_NOT_HANDLED;
570}
571
572static const struct {
573 char *vendor;
574 char *model;
575} rdac_dev_list[] = {
576 {"IBM", "1722"},
577 {"IBM", "1724"},
578 {"IBM", "1726"},
579 {"IBM", "1742"},
580 {"IBM", "1814"},
581 {"IBM", "1815"},
582 {"IBM", "1818"},
583 {"IBM", "3526"},
584 {"SGI", "TP9400"},
585 {"SGI", "TP9500"},
586 {"SGI", "IS"},
587 {"STK", "OPENstorage D280"},
588 {"SUN", "CSM200_R"},
589 {"SUN", "LCSM100_F"},
590 {NULL, NULL},
591};
592
593static int rdac_bus_notify(struct notifier_block *, unsigned long, void *);
594
595static struct scsi_device_handler rdac_dh = {
596 .name = RDAC_NAME,
597 .module = THIS_MODULE,
598 .nb.notifier_call = rdac_bus_notify,
599 .prep_fn = rdac_prep_fn,
600 .check_sense = rdac_check_sense,
601 .activate = rdac_activate,
602};
603
604/*
605 * TODO: need some interface so we can set trespass values
606 */
607static int rdac_bus_notify(struct notifier_block *nb,
608 unsigned long action, void *data)
609{
610 struct device *dev = data;
611 struct scsi_device *sdev = to_scsi_device(dev);
612 struct scsi_dh_data *scsi_dh_data;
613 struct rdac_dh_data *h;
614 int i, found = 0;
615 unsigned long flags;
616
617 if (action == BUS_NOTIFY_ADD_DEVICE) {
618 for (i = 0; rdac_dev_list[i].vendor; i++) {
619 if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
620 strlen(rdac_dev_list[i].vendor)) &&
621 !strncmp(sdev->model, rdac_dev_list[i].model,
622 strlen(rdac_dev_list[i].model))) {
623 found = 1;
624 break;
625 }
626 }
627 if (!found)
628 goto out;
629
630 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
631 + sizeof(*h) , GFP_KERNEL);
632 if (!scsi_dh_data) {
633 sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
634 RDAC_NAME);
635 goto out;
636 }
637
638 scsi_dh_data->scsi_dh = &rdac_dh;
639 h = (struct rdac_dh_data *) scsi_dh_data->buf;
640 h->lun = UNINITIALIZED_LUN;
641 h->state = RDAC_STATE_ACTIVE;
642 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
643 sdev->scsi_dh_data = scsi_dh_data;
644 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
645 try_module_get(THIS_MODULE);
646
647 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME);
648
649 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
650 if (sdev->scsi_dh_data == NULL ||
651 sdev->scsi_dh_data->scsi_dh != &rdac_dh)
652 goto out;
653
654 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
655 scsi_dh_data = sdev->scsi_dh_data;
656 sdev->scsi_dh_data = NULL;
657 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
658
659 h = (struct rdac_dh_data *) scsi_dh_data->buf;
660 if (h->ctlr)
661 kref_put(&h->ctlr->kref, release_controller);
662 kfree(scsi_dh_data);
663 module_put(THIS_MODULE);
664 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
665 }
666
667out:
668 return 0;
669}
670
671static int __init rdac_init(void)
672{
673 int r;
674
675 r = scsi_register_device_handler(&rdac_dh);
676 if (r != 0)
677 printk(KERN_ERR "Failed to register scsi device handler.");
678 return r;
679}
680
681static void __exit rdac_exit(void)
682{
683 scsi_unregister_device_handler(&rdac_dh);
684}
685
686module_init(rdac_init);
687module_exit(rdac_exit);
688
689MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
690MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
691MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 59fbef08d690..62a4618530d0 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -219,19 +219,10 @@ static void esp_reset_esp(struct esp *esp)
219 /* Now reset the ESP chip */ 219 /* Now reset the ESP chip */
220 scsi_esp_cmd(esp, ESP_CMD_RC); 220 scsi_esp_cmd(esp, ESP_CMD_RC);
221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222 if (esp->rev == FAST)
223 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
222 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 224 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
223 225
224 /* Reload the configuration registers */
225 esp_write8(esp->cfact, ESP_CFACT);
226
227 esp->prev_stp = 0;
228 esp_write8(esp->prev_stp, ESP_STP);
229
230 esp->prev_soff = 0;
231 esp_write8(esp->prev_soff, ESP_SOFF);
232
233 esp_write8(esp->neg_defp, ESP_TIMEO);
234
235 /* This is the only point at which it is reliable to read 226 /* This is the only point at which it is reliable to read
236 * the ID-code for a fast ESP chip variants. 227 * the ID-code for a fast ESP chip variants.
237 */ 228 */
@@ -316,6 +307,17 @@ static void esp_reset_esp(struct esp *esp)
316 break; 307 break;
317 } 308 }
318 309
310 /* Reload the configuration registers */
311 esp_write8(esp->cfact, ESP_CFACT);
312
313 esp->prev_stp = 0;
314 esp_write8(esp->prev_stp, ESP_STP);
315
316 esp->prev_soff = 0;
317 esp_write8(esp->prev_soff, ESP_SOFF);
318
319 esp_write8(esp->neg_defp, ESP_TIMEO);
320
319 /* Eat any bitrot in the chip */ 321 /* Eat any bitrot in the chip */
320 esp_read8(ESP_INTRPT); 322 esp_read8(ESP_INTRPT);
321 udelay(100); 323 udelay(100);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index c6457bfc8a49..35cd892dce04 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -290,7 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
290 kfree(shost); 290 kfree(shost);
291} 291}
292 292
293struct device_type scsi_host_type = { 293static struct device_type scsi_host_type = {
294 .name = "scsi_host", 294 .name = "scsi_host",
295 .release = scsi_host_dev_release, 295 .release = scsi_host_dev_release,
296}; 296};
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index 6ac0633d5452..a423d9633625 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -5,3 +5,4 @@ ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
5ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o 5ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
6 6
7obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o 7obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
8obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
new file mode 100644
index 000000000000..eb702b96d57c
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -0,0 +1,3910 @@
1/*
2 * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
3 *
4 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) IBM Corporation, 2008
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/kthread.h>
31#include <linux/of.h>
32#include <linux/stringify.h>
33#include <asm/firmware.h>
34#include <asm/irq.h>
35#include <asm/vio.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_cmnd.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_tcq.h>
41#include <scsi/scsi_transport_fc.h>
42#include "ibmvfc.h"
43
44static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
45static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
46static unsigned int max_lun = IBMVFC_MAX_LUN;
47static unsigned int max_targets = IBMVFC_MAX_TARGETS;
48static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
49static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
50static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
51static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
52static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
53static LIST_HEAD(ibmvfc_head);
54static DEFINE_SPINLOCK(ibmvfc_driver_lock);
55static struct scsi_transport_template *ibmvfc_transport_template;
56
57MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
58MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
59MODULE_LICENSE("GPL");
60MODULE_VERSION(IBMVFC_DRIVER_VERSION);
61
62module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
63MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
64 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
65module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
66MODULE_PARM_DESC(default_timeout,
67 "Default timeout in seconds for initialization and EH commands. "
68 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
69module_param_named(max_requests, max_requests, uint, S_IRUGO);
70MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
71 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
72module_param_named(max_lun, max_lun, uint, S_IRUGO);
73MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
74 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
75module_param_named(max_targets, max_targets, uint, S_IRUGO);
76MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
77 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
78module_param_named(disc_threads, disc_threads, uint, S_IRUGO | S_IWUSR);
79MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
80 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
81module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
82MODULE_PARM_DESC(debug, "Enable driver debug information. "
83 "[Default=" __stringify(IBMVFC_DEBUG) "]");
84module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
86 "transport should insulate the loss of a remote port. Once this "
87 "value is exceeded, the scsi target is removed. "
88 "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
89module_param_named(log_level, log_level, uint, 0);
90MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
91 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
92
93static const struct {
94 u16 status;
95 u16 error;
96 u8 result;
97 u8 retry;
98 int log;
99 char *name;
100} cmd_status [] = {
101 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
102 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
103 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
104 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" },
105 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
106 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
107 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
108 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
109 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
115
116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
117 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
118 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" },
119 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" },
120 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
124 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
125
126 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
127 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
128 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
129 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
130 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
131 { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
132 { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
133 { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
134 { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
135 { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
136 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
137
138 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
139};
140
141static void ibmvfc_npiv_login(struct ibmvfc_host *);
142static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
143static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
144static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
145
146static const char *unknown_error = "unknown error";
147
148#ifdef CONFIG_SCSI_IBMVFC_TRACE
149/**
150 * ibmvfc_trc_start - Log a start trace entry
151 * @evt: ibmvfc event struct
152 *
153 **/
154static void ibmvfc_trc_start(struct ibmvfc_event *evt)
155{
156 struct ibmvfc_host *vhost = evt->vhost;
157 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
158 struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
159 struct ibmvfc_trace_entry *entry;
160
161 entry = &vhost->trace[vhost->trace_index++];
162 entry->evt = evt;
163 entry->time = jiffies;
164 entry->fmt = evt->crq.format;
165 entry->type = IBMVFC_TRC_START;
166
167 switch (entry->fmt) {
168 case IBMVFC_CMD_FORMAT:
169 entry->op_code = vfc_cmd->iu.cdb[0];
170 entry->scsi_id = vfc_cmd->tgt_scsi_id;
171 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
172 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
173 entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
174 break;
175 case IBMVFC_MAD_FORMAT:
176 entry->op_code = mad->opcode;
177 break;
178 default:
179 break;
180 };
181}
182
183/**
184 * ibmvfc_trc_end - Log an end trace entry
185 * @evt: ibmvfc event struct
186 *
187 **/
188static void ibmvfc_trc_end(struct ibmvfc_event *evt)
189{
190 struct ibmvfc_host *vhost = evt->vhost;
191 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
192 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
193 struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
194
195 entry->evt = evt;
196 entry->time = jiffies;
197 entry->fmt = evt->crq.format;
198 entry->type = IBMVFC_TRC_END;
199
200 switch (entry->fmt) {
201 case IBMVFC_CMD_FORMAT:
202 entry->op_code = vfc_cmd->iu.cdb[0];
203 entry->scsi_id = vfc_cmd->tgt_scsi_id;
204 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
205 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
206 entry->u.end.status = vfc_cmd->status;
207 entry->u.end.error = vfc_cmd->error;
208 entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
209 entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
210 entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
211 break;
212 case IBMVFC_MAD_FORMAT:
213 entry->op_code = mad->opcode;
214 entry->u.end.status = mad->status;
215 break;
216 default:
217 break;
218
219 };
220}
221
222#else
223#define ibmvfc_trc_start(evt) do { } while (0)
224#define ibmvfc_trc_end(evt) do { } while (0)
225#endif
226
227/**
228 * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
229 * @status: status / error class
230 * @error: error
231 *
232 * Return value:
233 * index into cmd_status / -EINVAL on failure
234 **/
235static int ibmvfc_get_err_index(u16 status, u16 error)
236{
237 int i;
238
239 for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
240 if ((cmd_status[i].status & status) == cmd_status[i].status &&
241 cmd_status[i].error == error)
242 return i;
243
244 return -EINVAL;
245}
246
247/**
248 * ibmvfc_get_cmd_error - Find the error description for the fcp response
249 * @status: status / error class
250 * @error: error
251 *
252 * Return value:
253 * error description string
254 **/
255static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
256{
257 int rc = ibmvfc_get_err_index(status, error);
258 if (rc >= 0)
259 return cmd_status[rc].name;
260 return unknown_error;
261}
262
263/**
264 * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
265 * @vfc_cmd: ibmvfc command struct
266 *
267 * Return value:
268 * SCSI result value to return for completed command
269 **/
270static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
271{
272 int err;
273 struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
274 int fc_rsp_len = rsp->fcp_rsp_len;
275
276 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
277 ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
278 rsp->data.info.rsp_code))
279 return DID_ERROR << 16;
280
281 if (!vfc_cmd->status) {
282 if (rsp->flags & FCP_RESID_OVER)
283 return rsp->scsi_status | (DID_ERROR << 16);
284 else
285 return rsp->scsi_status | (DID_OK << 16);
286 }
287
288 err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
289 if (err >= 0)
290 return rsp->scsi_status | (cmd_status[err].result << 16);
291 return rsp->scsi_status | (DID_ERROR << 16);
292}
293
294/**
295 * ibmvfc_retry_cmd - Determine if error status is retryable
296 * @status: status / error class
297 * @error: error
298 *
299 * Return value:
300 * 1 if error should be retried / 0 if it should not
301 **/
302static int ibmvfc_retry_cmd(u16 status, u16 error)
303{
304 int rc = ibmvfc_get_err_index(status, error);
305
306 if (rc >= 0)
307 return cmd_status[rc].retry;
308 return 1;
309}
310
311static const char *unknown_fc_explain = "unknown fc explain";
312
313static const struct {
314 u16 fc_explain;
315 char *name;
316} ls_explain [] = {
317 { 0x00, "no additional explanation" },
318 { 0x01, "service parameter error - options" },
319 { 0x03, "service parameter error - initiator control" },
320 { 0x05, "service parameter error - recipient control" },
321 { 0x07, "service parameter error - received data field size" },
322 { 0x09, "service parameter error - concurrent seq" },
323 { 0x0B, "service parameter error - credit" },
324 { 0x0D, "invalid N_Port/F_Port_Name" },
325 { 0x0E, "invalid node/Fabric Name" },
326 { 0x0F, "invalid common service parameters" },
327 { 0x11, "invalid association header" },
328 { 0x13, "association header required" },
329 { 0x15, "invalid originator S_ID" },
330 { 0x17, "invalid OX_ID-RX-ID combination" },
331 { 0x19, "command (request) already in progress" },
332 { 0x1E, "N_Port Login requested" },
333 { 0x1F, "Invalid N_Port_ID" },
334};
335
336static const struct {
337 u16 fc_explain;
338 char *name;
339} gs_explain [] = {
340 { 0x00, "no additional explanation" },
341 { 0x01, "port identifier not registered" },
342 { 0x02, "port name not registered" },
343 { 0x03, "node name not registered" },
344 { 0x04, "class of service not registered" },
345 { 0x06, "initial process associator not registered" },
346 { 0x07, "FC-4 TYPEs not registered" },
347 { 0x08, "symbolic port name not registered" },
348 { 0x09, "symbolic node name not registered" },
349 { 0x0A, "port type not registered" },
350 { 0xF0, "authorization exception" },
351 { 0xF1, "authentication exception" },
352 { 0xF2, "data base full" },
353 { 0xF3, "data base empty" },
354 { 0xF4, "processing request" },
355 { 0xF5, "unable to verify connection" },
356 { 0xF6, "devices not in a common zone" },
357};
358
359/**
360 * ibmvfc_get_ls_explain - Return the FC Explain description text
361 * @status: FC Explain status
362 *
363 * Returns:
364 * error string
365 **/
366static const char *ibmvfc_get_ls_explain(u16 status)
367{
368 int i;
369
370 for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
371 if (ls_explain[i].fc_explain == status)
372 return ls_explain[i].name;
373
374 return unknown_fc_explain;
375}
376
377/**
378 * ibmvfc_get_gs_explain - Return the FC Explain description text
379 * @status: FC Explain status
380 *
381 * Returns:
382 * error string
383 **/
384static const char *ibmvfc_get_gs_explain(u16 status)
385{
386 int i;
387
388 for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
389 if (gs_explain[i].fc_explain == status)
390 return gs_explain[i].name;
391
392 return unknown_fc_explain;
393}
394
395static const struct {
396 enum ibmvfc_fc_type fc_type;
397 char *name;
398} fc_type [] = {
399 { IBMVFC_FABRIC_REJECT, "fabric reject" },
400 { IBMVFC_PORT_REJECT, "port reject" },
401 { IBMVFC_LS_REJECT, "ELS reject" },
402 { IBMVFC_FABRIC_BUSY, "fabric busy" },
403 { IBMVFC_PORT_BUSY, "port busy" },
404 { IBMVFC_BASIC_REJECT, "basic reject" },
405};
406
407static const char *unknown_fc_type = "unknown fc type";
408
409/**
410 * ibmvfc_get_fc_type - Return the FC Type description text
411 * @status: FC Type error status
412 *
413 * Returns:
414 * error string
415 **/
416static const char *ibmvfc_get_fc_type(u16 status)
417{
418 int i;
419
420 for (i = 0; i < ARRAY_SIZE(fc_type); i++)
421 if (fc_type[i].fc_type == status)
422 return fc_type[i].name;
423
424 return unknown_fc_type;
425}
426
427/**
428 * ibmvfc_set_tgt_action - Set the next init action for the target
429 * @tgt: ibmvfc target struct
430 * @action: action to perform
431 *
432 **/
433static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
434 enum ibmvfc_target_action action)
435{
436 switch (tgt->action) {
437 case IBMVFC_TGT_ACTION_DEL_RPORT:
438 break;
439 default:
440 tgt->action = action;
441 break;
442 }
443}
444
445/**
446 * ibmvfc_set_host_state - Set the state for the host
447 * @vhost: ibmvfc host struct
448 * @state: state to set host to
449 *
450 * Returns:
451 * 0 if state changed / non-zero if not changed
452 **/
453static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
454 enum ibmvfc_host_state state)
455{
456 int rc = 0;
457
458 switch (vhost->state) {
459 case IBMVFC_HOST_OFFLINE:
460 rc = -EINVAL;
461 break;
462 default:
463 vhost->state = state;
464 break;
465 };
466
467 return rc;
468}
469
470/**
471 * ibmvfc_set_host_action - Set the next init action for the host
472 * @vhost: ibmvfc host struct
473 * @action: action to perform
474 *
475 **/
476static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
477 enum ibmvfc_host_action action)
478{
479 switch (action) {
480 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
481 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
482 vhost->action = action;
483 break;
484 case IBMVFC_HOST_ACTION_INIT_WAIT:
485 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
486 vhost->action = action;
487 break;
488 case IBMVFC_HOST_ACTION_QUERY:
489 switch (vhost->action) {
490 case IBMVFC_HOST_ACTION_INIT_WAIT:
491 case IBMVFC_HOST_ACTION_NONE:
492 case IBMVFC_HOST_ACTION_TGT_ADD:
493 vhost->action = action;
494 break;
495 default:
496 break;
497 };
498 break;
499 case IBMVFC_HOST_ACTION_TGT_INIT:
500 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
501 vhost->action = action;
502 break;
503 case IBMVFC_HOST_ACTION_INIT:
504 case IBMVFC_HOST_ACTION_TGT_DEL:
505 case IBMVFC_HOST_ACTION_QUERY_TGTS:
506 case IBMVFC_HOST_ACTION_TGT_ADD:
507 case IBMVFC_HOST_ACTION_NONE:
508 default:
509 vhost->action = action;
510 break;
511 };
512}
513
514/**
515 * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
516 * @vhost: ibmvfc host struct
517 *
518 * Return value:
519 * nothing
520 **/
521static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
522{
523 if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
524 scsi_block_requests(vhost->host);
525 ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING);
526 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
527 } else
528 vhost->reinit = 1;
529
530 wake_up(&vhost->work_wait_q);
531}
532
533/**
534 * ibmvfc_link_down - Handle a link down event from the adapter
535 * @vhost: ibmvfc host struct
536 * @state: ibmvfc host state to enter
537 *
538 **/
539static void ibmvfc_link_down(struct ibmvfc_host *vhost,
540 enum ibmvfc_host_state state)
541{
542 struct ibmvfc_target *tgt;
543
544 ENTER;
545 scsi_block_requests(vhost->host);
546 list_for_each_entry(tgt, &vhost->targets, queue)
547 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
548 ibmvfc_set_host_state(vhost, state);
549 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
550 vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
551 wake_up(&vhost->work_wait_q);
552 LEAVE;
553}
554
555/**
556 * ibmvfc_init_host - Start host initialization
557 * @vhost: ibmvfc host struct
558 *
559 * Return value:
560 * nothing
561 **/
562static void ibmvfc_init_host(struct ibmvfc_host *vhost)
563{
564 struct ibmvfc_target *tgt;
565
566 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
567 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
568 dev_err(vhost->dev,
569 "Host initialization retries exceeded. Taking adapter offline\n");
570 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
571 return;
572 }
573 }
574
575 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
576 list_for_each_entry(tgt, &vhost->targets, queue)
577 tgt->need_login = 1;
578 scsi_block_requests(vhost->host);
579 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
580 vhost->job_step = ibmvfc_npiv_login;
581 wake_up(&vhost->work_wait_q);
582 }
583}
584
585/**
586 * ibmvfc_send_crq - Send a CRQ
587 * @vhost: ibmvfc host struct
588 * @word1: the first 64 bits of the data
589 * @word2: the second 64 bits of the data
590 *
591 * Return value:
592 * 0 on success / other on failure
593 **/
594static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
595{
596 struct vio_dev *vdev = to_vio_dev(vhost->dev);
597 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
598}
599
600/**
601 * ibmvfc_send_crq_init - Send a CRQ init message
602 * @vhost: ibmvfc host struct
603 *
604 * Return value:
605 * 0 on success / other on failure
606 **/
607static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
608{
609 ibmvfc_dbg(vhost, "Sending CRQ init\n");
610 return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
611}
612
613/**
614 * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
615 * @vhost: ibmvfc host struct
616 *
617 * Return value:
618 * 0 on success / other on failure
619 **/
620static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
621{
622 ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
623 return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
624}
625
626/**
627 * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
628 * @vhost: ibmvfc host struct
629 *
630 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
631 * the crq with the hypervisor.
632 **/
633static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
634{
635 long rc;
636 struct vio_dev *vdev = to_vio_dev(vhost->dev);
637 struct ibmvfc_crq_queue *crq = &vhost->crq;
638
639 ibmvfc_dbg(vhost, "Releasing CRQ\n");
640 free_irq(vdev->irq, vhost);
641 do {
642 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
643 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
644
645 vhost->state = IBMVFC_NO_CRQ;
646 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
647 free_page((unsigned long)crq->msgs);
648}
649
650/**
651 * ibmvfc_reenable_crq_queue - reenables the CRQ
652 * @vhost: ibmvfc host struct
653 *
654 * Return value:
655 * 0 on success / other on failure
656 **/
657static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
658{
659 int rc;
660 struct vio_dev *vdev = to_vio_dev(vhost->dev);
661
662 /* Re-enable the CRQ */
663 do {
664 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
665 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
666
667 if (rc)
668 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
669
670 return rc;
671}
672
673/**
674 * ibmvfc_reset_crq - resets a crq after a failure
675 * @vhost: ibmvfc host struct
676 *
677 * Return value:
678 * 0 on success / other on failure
679 **/
680static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
681{
682 int rc;
683 struct vio_dev *vdev = to_vio_dev(vhost->dev);
684 struct ibmvfc_crq_queue *crq = &vhost->crq;
685
686 /* Close the CRQ */
687 do {
688 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
689 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
690
691 vhost->state = IBMVFC_NO_CRQ;
692 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
693
694 /* Clean out the queue */
695 memset(crq->msgs, 0, PAGE_SIZE);
696 crq->cur = 0;
697
698 /* And re-open it again */
699 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
700 crq->msg_token, PAGE_SIZE);
701
702 if (rc == H_CLOSED)
703 /* Adapter is good, but other end is not ready */
704 dev_warn(vhost->dev, "Partner adapter not ready\n");
705 else if (rc != 0)
706 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
707
708 return rc;
709}
710
711/**
712 * ibmvfc_valid_event - Determines if event is valid.
713 * @pool: event_pool that contains the event
714 * @evt: ibmvfc event to be checked for validity
715 *
716 * Return value:
717 * 1 if event is valid / 0 if event is not valid
718 **/
719static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
720 struct ibmvfc_event *evt)
721{
722 int index = evt - pool->events;
723 if (index < 0 || index >= pool->size) /* outside of bounds */
724 return 0;
725 if (evt != pool->events + index) /* unaligned */
726 return 0;
727 return 1;
728}
729
730/**
731 * ibmvfc_free_event - Free the specified event
732 * @evt: ibmvfc_event to be freed
733 *
734 **/
735static void ibmvfc_free_event(struct ibmvfc_event *evt)
736{
737 struct ibmvfc_host *vhost = evt->vhost;
738 struct ibmvfc_event_pool *pool = &vhost->pool;
739
740 BUG_ON(!ibmvfc_valid_event(pool, evt));
741 BUG_ON(atomic_inc_return(&evt->free) != 1);
742 list_add_tail(&evt->queue, &vhost->free);
743}
744
745/**
746 * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
747 * @evt: ibmvfc event struct
748 *
749 * This function does not setup any error status, that must be done
750 * before this function gets called.
751 **/
752static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
753{
754 struct scsi_cmnd *cmnd = evt->cmnd;
755
756 if (cmnd) {
757 scsi_dma_unmap(cmnd);
758 cmnd->scsi_done(cmnd);
759 }
760
761 ibmvfc_free_event(evt);
762}
763
764/**
765 * ibmvfc_fail_request - Fail request with specified error code
766 * @evt: ibmvfc event struct
767 * @error_code: error code to fail request with
768 *
769 * Return value:
770 * none
771 **/
772static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
773{
774 if (evt->cmnd) {
775 evt->cmnd->result = (error_code << 16);
776 evt->done = ibmvfc_scsi_eh_done;
777 } else
778 evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
779
780 list_del(&evt->queue);
781 del_timer(&evt->timer);
782 ibmvfc_trc_end(evt);
783 evt->done(evt);
784}
785
786/**
787 * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
788 * @vhost: ibmvfc host struct
789 * @error_code: error code to fail requests with
790 *
791 * Return value:
792 * none
793 **/
794static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
795{
796 struct ibmvfc_event *evt, *pos;
797
798 ibmvfc_dbg(vhost, "Purging all requests\n");
799 list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
800 ibmvfc_fail_request(evt, error_code);
801}
802
803/**
804 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
805 * @vhost: struct ibmvfc host to reset
806 **/
807static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
808{
809 int rc;
810
811 scsi_block_requests(vhost->host);
812 ibmvfc_purge_requests(vhost, DID_ERROR);
813 if ((rc = ibmvfc_reset_crq(vhost)) ||
814 (rc = ibmvfc_send_crq_init(vhost)) ||
815 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
816 dev_err(vhost->dev, "Error after reset rc=%d\n", rc);
817 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
818 } else
819 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
820}
821
822/**
823 * ibmvfc_reset_host - Reset the connection to the server
824 * @vhost: struct ibmvfc host to reset
825 **/
826static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
827{
828 unsigned long flags;
829
830 spin_lock_irqsave(vhost->host->host_lock, flags);
831 __ibmvfc_reset_host(vhost);
832 spin_unlock_irqrestore(vhost->host->host_lock, flags);
833}
834
835/**
836 * ibmvfc_retry_host_init - Retry host initialization if allowed
837 * @vhost: ibmvfc host struct
838 *
839 **/
840static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
841{
842 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
843 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
844 dev_err(vhost->dev,
845 "Host initialization retries exceeded. Taking adapter offline\n");
846 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
847 } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
848 __ibmvfc_reset_host(vhost);
849 else
850 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
851 }
852
853 wake_up(&vhost->work_wait_q);
854}
855
856/**
857 * __ibmvfc_find_target - Find the specified scsi_target (no locking)
858 * @starget: scsi target struct
859 *
860 * Return value:
861 * ibmvfc_target struct / NULL if not found
862 **/
863static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget)
864{
865 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
866 struct ibmvfc_host *vhost = shost_priv(shost);
867 struct ibmvfc_target *tgt;
868
869 list_for_each_entry(tgt, &vhost->targets, queue)
870 if (tgt->target_id == starget->id)
871 return tgt;
872 return NULL;
873}
874
875/**
876 * ibmvfc_find_target - Find the specified scsi_target
877 * @starget: scsi target struct
878 *
879 * Return value:
880 * ibmvfc_target struct / NULL if not found
881 **/
882static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget)
883{
884 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
885 struct ibmvfc_target *tgt;
886 unsigned long flags;
887
888 spin_lock_irqsave(shost->host_lock, flags);
889 tgt = __ibmvfc_find_target(starget);
890 spin_unlock_irqrestore(shost->host_lock, flags);
891 return tgt;
892}
893
894/**
895 * ibmvfc_get_host_speed - Get host port speed
896 * @shost: scsi host struct
897 *
898 * Return value:
899 * none
900 **/
901static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
902{
903 struct ibmvfc_host *vhost = shost_priv(shost);
904 unsigned long flags;
905
906 spin_lock_irqsave(shost->host_lock, flags);
907 if (vhost->state == IBMVFC_ACTIVE) {
908 switch (vhost->login_buf->resp.link_speed / 100) {
909 case 1:
910 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
911 break;
912 case 2:
913 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
914 break;
915 case 4:
916 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
917 break;
918 case 8:
919 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
920 break;
921 case 10:
922 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
923 break;
924 case 16:
925 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
926 break;
927 default:
928 ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n",
929 vhost->login_buf->resp.link_speed / 100);
930 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
931 break;
932 }
933 } else
934 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
935 spin_unlock_irqrestore(shost->host_lock, flags);
936}
937
938/**
939 * ibmvfc_get_host_port_state - Get host port state
940 * @shost: scsi host struct
941 *
942 * Return value:
943 * none
944 **/
945static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
946{
947 struct ibmvfc_host *vhost = shost_priv(shost);
948 unsigned long flags;
949
950 spin_lock_irqsave(shost->host_lock, flags);
951 switch (vhost->state) {
952 case IBMVFC_INITIALIZING:
953 case IBMVFC_ACTIVE:
954 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
955 break;
956 case IBMVFC_LINK_DOWN:
957 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
958 break;
959 case IBMVFC_LINK_DEAD:
960 case IBMVFC_HOST_OFFLINE:
961 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
962 break;
963 case IBMVFC_HALTED:
964 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
965 break;
966 default:
967 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
968 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
969 break;
970 }
971 spin_unlock_irqrestore(shost->host_lock, flags);
972}
973
974/**
975 * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
976 * @rport: rport struct
977 * @timeout: timeout value
978 *
979 * Return value:
980 * none
981 **/
982static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
983{
984 if (timeout)
985 rport->dev_loss_tmo = timeout;
986 else
987 rport->dev_loss_tmo = 1;
988}
989
990/**
991 * ibmvfc_get_starget_node_name - Get SCSI target's node name
992 * @starget: scsi target struct
993 *
994 * Return value:
995 * none
996 **/
997static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
998{
999 struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
1000 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1001}
1002
1003/**
1004 * ibmvfc_get_starget_port_name - Get SCSI target's port name
1005 * @starget: scsi target struct
1006 *
1007 * Return value:
1008 * none
1009 **/
1010static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1011{
1012 struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
1013 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1014}
1015
1016/**
1017 * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1018 * @starget: scsi target struct
1019 *
1020 * Return value:
1021 * none
1022 **/
1023static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1024{
1025 struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
1026 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1027}
1028
1029/**
1030 * ibmvfc_wait_while_resetting - Wait while the host resets
1031 * @vhost: ibmvfc host struct
1032 *
1033 * Return value:
1034 * 0 on success / other on failure
1035 **/
1036static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1037{
1038 long timeout = wait_event_timeout(vhost->init_wait_q,
1039 (vhost->state == IBMVFC_ACTIVE ||
1040 vhost->state == IBMVFC_HOST_OFFLINE ||
1041 vhost->state == IBMVFC_LINK_DEAD),
1042 (init_timeout * HZ));
1043
1044 return timeout ? 0 : -EIO;
1045}
1046
1047/**
1048 * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1049 * @shost: scsi host struct
1050 *
1051 * Return value:
1052 * 0 on success / other on failure
1053 **/
1054static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1055{
1056 struct ibmvfc_host *vhost = shost_priv(shost);
1057
1058 dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1059 ibmvfc_reset_host(vhost);
1060 return ibmvfc_wait_while_resetting(vhost);
1061}
1062
1063/**
1064 * ibmvfc_gather_partition_info - Gather info about the LPAR
1065 *
1066 * Return value:
1067 * none
1068 **/
1069static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1070{
1071 struct device_node *rootdn;
1072 const char *name;
1073 const unsigned int *num;
1074
1075 rootdn = of_find_node_by_path("/");
1076 if (!rootdn)
1077 return;
1078
1079 name = of_get_property(rootdn, "ibm,partition-name", NULL);
1080 if (name)
1081 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1082 num = of_get_property(rootdn, "ibm,partition-no", NULL);
1083 if (num)
1084 vhost->partition_number = *num;
1085 of_node_put(rootdn);
1086}
1087
1088/**
1089 * ibmvfc_set_login_info - Setup info for NPIV login
1090 * @vhost: ibmvfc host struct
1091 *
1092 * Return value:
1093 * none
1094 **/
1095static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1096{
1097 struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1098 struct device_node *of_node = vhost->dev->archdata.of_node;
1099 const char *location;
1100
1101 memset(login_info, 0, sizeof(*login_info));
1102
1103 login_info->ostype = IBMVFC_OS_LINUX;
1104 login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
1105 login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
1106 login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
1107 login_info->partition_num = vhost->partition_number;
1108 login_info->vfc_frame_version = 1;
1109 login_info->fcp_version = 3;
1110 if (vhost->client_migrated)
1111 login_info->flags = IBMVFC_CLIENT_MIGRATED;
1112
1113 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1114 login_info->capabilities = IBMVFC_CAN_MIGRATE;
1115 login_info->async.va = vhost->async_crq.msg_token;
1116 login_info->async.len = vhost->async_crq.size;
1117 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1118 strncpy(login_info->device_name,
1119 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
1120
1121 location = of_get_property(of_node, "ibm,loc-code", NULL);
1122 location = location ? location : vhost->dev->bus_id;
1123 strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1124}
1125
1126/**
1127 * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1128 * @vhost: ibmvfc host who owns the event pool
1129 *
1130 * Returns zero on success.
1131 **/
1132static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
1133{
1134 int i;
1135 struct ibmvfc_event_pool *pool = &vhost->pool;
1136
1137 ENTER;
1138 pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1139 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
1140 if (!pool->events)
1141 return -ENOMEM;
1142
1143 pool->iu_storage = dma_alloc_coherent(vhost->dev,
1144 pool->size * sizeof(*pool->iu_storage),
1145 &pool->iu_token, 0);
1146
1147 if (!pool->iu_storage) {
1148 kfree(pool->events);
1149 return -ENOMEM;
1150 }
1151
1152 for (i = 0; i < pool->size; ++i) {
1153 struct ibmvfc_event *evt = &pool->events[i];
1154 atomic_set(&evt->free, 1);
1155 evt->crq.valid = 0x80;
1156 evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
1157 evt->xfer_iu = pool->iu_storage + i;
1158 evt->vhost = vhost;
1159 evt->ext_list = NULL;
1160 list_add_tail(&evt->queue, &vhost->free);
1161 }
1162
1163 LEAVE;
1164 return 0;
1165}
1166
1167/**
1168 * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1169 * @vhost: ibmvfc host who owns the event pool
1170 *
1171 **/
1172static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
1173{
1174 int i;
1175 struct ibmvfc_event_pool *pool = &vhost->pool;
1176
1177 ENTER;
1178 for (i = 0; i < pool->size; ++i) {
1179 list_del(&pool->events[i].queue);
1180 BUG_ON(atomic_read(&pool->events[i].free) != 1);
1181 if (pool->events[i].ext_list)
1182 dma_pool_free(vhost->sg_pool,
1183 pool->events[i].ext_list,
1184 pool->events[i].ext_list_token);
1185 }
1186
1187 kfree(pool->events);
1188 dma_free_coherent(vhost->dev,
1189 pool->size * sizeof(*pool->iu_storage),
1190 pool->iu_storage, pool->iu_token);
1191 LEAVE;
1192}
1193
1194/**
1195 * ibmvfc_get_event - Gets the next free event in pool
1196 * @vhost: ibmvfc host struct
1197 *
1198 * Returns a free event from the pool.
1199 **/
1200static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
1201{
1202 struct ibmvfc_event *evt;
1203
1204 BUG_ON(list_empty(&vhost->free));
1205 evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1206 atomic_set(&evt->free, 0);
1207 list_del(&evt->queue);
1208 return evt;
1209}
1210
1211/**
1212 * ibmvfc_init_event - Initialize fields in an event struct that are always
1213 * required.
1214 * @evt: The event
1215 * @done: Routine to call when the event is responded to
1216 * @format: SRP or MAD format
1217 **/
1218static void ibmvfc_init_event(struct ibmvfc_event *evt,
1219 void (*done) (struct ibmvfc_event *), u8 format)
1220{
1221 evt->cmnd = NULL;
1222 evt->sync_iu = NULL;
1223 evt->crq.format = format;
1224 evt->done = done;
1225}
1226
1227/**
1228 * ibmvfc_map_sg_list - Initialize scatterlist
1229 * @scmd: scsi command struct
1230 * @nseg: number of scatterlist segments
1231 * @md: memory descriptor list to initialize
1232 **/
1233static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1234 struct srp_direct_buf *md)
1235{
1236 int i;
1237 struct scatterlist *sg;
1238
1239 scsi_for_each_sg(scmd, sg, nseg, i) {
1240 md[i].va = sg_dma_address(sg);
1241 md[i].len = sg_dma_len(sg);
1242 md[i].key = 0;
1243 }
1244}
1245
1246/**
1247 * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
1248 * @scmd: Scsi_Cmnd with the scatterlist
1249 * @evt: ibmvfc event struct
1250 * @vfc_cmd: vfc_cmd that contains the memory descriptor
1251 * @dev: device for which to map dma memory
1252 *
1253 * Returns:
1254 * 0 on success / non-zero on failure
1255 **/
1256static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1257 struct ibmvfc_event *evt,
1258 struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1259{
1260
1261 int sg_mapped;
1262 struct srp_direct_buf *data = &vfc_cmd->ioba;
1263 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1264
1265 sg_mapped = scsi_dma_map(scmd);
1266 if (!sg_mapped) {
1267 vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
1268 return 0;
1269 } else if (unlikely(sg_mapped < 0)) {
1270 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1271 scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1272 return sg_mapped;
1273 }
1274
1275 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1276 vfc_cmd->flags |= IBMVFC_WRITE;
1277 vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
1278 } else {
1279 vfc_cmd->flags |= IBMVFC_READ;
1280 vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
1281 }
1282
1283 if (sg_mapped == 1) {
1284 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1285 return 0;
1286 }
1287
1288 vfc_cmd->flags |= IBMVFC_SCATTERLIST;
1289
1290 if (!evt->ext_list) {
1291 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1292 &evt->ext_list_token);
1293
1294 if (!evt->ext_list) {
1295 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1296 return -ENOMEM;
1297 }
1298 }
1299
1300 ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1301
1302 data->va = evt->ext_list_token;
1303 data->len = sg_mapped * sizeof(struct srp_direct_buf);
1304 data->key = 0;
1305 return 0;
1306}
1307
1308/**
1309 * ibmvfc_timeout - Internal command timeout handler
1310 * @evt: struct ibmvfc_event that timed out
1311 *
1312 * Called when an internally generated command times out
1313 **/
1314static void ibmvfc_timeout(struct ibmvfc_event *evt)
1315{
1316 struct ibmvfc_host *vhost = evt->vhost;
1317 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1318 ibmvfc_reset_host(vhost);
1319}
1320
1321/**
1322 * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1323 * @evt: event to be sent
1324 * @vhost: ibmvfc host struct
1325 * @timeout: timeout in seconds - 0 means do not time command
1326 *
1327 * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1328 **/
1329static int ibmvfc_send_event(struct ibmvfc_event *evt,
1330 struct ibmvfc_host *vhost, unsigned long timeout)
1331{
1332 u64 *crq_as_u64 = (u64 *) &evt->crq;
1333 int rc;
1334
1335 /* Copy the IU into the transfer area */
1336 *evt->xfer_iu = evt->iu;
1337 if (evt->crq.format == IBMVFC_CMD_FORMAT)
1338 evt->xfer_iu->cmd.tag = (u64)evt;
1339 else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1340 evt->xfer_iu->mad_common.tag = (u64)evt;
1341 else
1342 BUG();
1343
1344 list_add_tail(&evt->queue, &vhost->sent);
1345 init_timer(&evt->timer);
1346
1347 if (timeout) {
1348 evt->timer.data = (unsigned long) evt;
1349 evt->timer.expires = jiffies + (timeout * HZ);
1350 evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
1351 add_timer(&evt->timer);
1352 }
1353
1354 if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
1355 list_del(&evt->queue);
1356 del_timer(&evt->timer);
1357
1358 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1359 * Firmware will send a CRQ with a transport event (0xFF) to
1360 * tell this client what has happened to the transport. This
1361 * will be handled in ibmvfc_handle_crq()
1362 */
1363 if (rc == H_CLOSED) {
1364 if (printk_ratelimit())
1365 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1366 if (evt->cmnd)
1367 scsi_dma_unmap(evt->cmnd);
1368 ibmvfc_free_event(evt);
1369 return SCSI_MLQUEUE_HOST_BUSY;
1370 }
1371
1372 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1373 if (evt->cmnd) {
1374 evt->cmnd->result = DID_ERROR << 16;
1375 evt->done = ibmvfc_scsi_eh_done;
1376 } else
1377 evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
1378
1379 evt->done(evt);
1380 } else
1381 ibmvfc_trc_start(evt);
1382
1383 return 0;
1384}
1385
1386/**
1387 * ibmvfc_log_error - Log an error for the failed command if appropriate
1388 * @evt: ibmvfc event to log
1389 *
1390 **/
1391static void ibmvfc_log_error(struct ibmvfc_event *evt)
1392{
1393 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1394 struct ibmvfc_host *vhost = evt->vhost;
1395 struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1396 struct scsi_cmnd *cmnd = evt->cmnd;
1397 const char *err = unknown_error;
1398 int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
1399 int logerr = 0;
1400 int rsp_code = 0;
1401
1402 if (index >= 0) {
1403 logerr = cmd_status[index].log;
1404 err = cmd_status[index].name;
1405 }
1406
1407 if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL))
1408 return;
1409
1410 if (rsp->flags & FCP_RSP_LEN_VALID)
1411 rsp_code = rsp->data.info.rsp_code;
1412
1413 scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
1414 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1415 cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
1416 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1417}
1418
1419/**
1420 * ibmvfc_scsi_done - Handle responses from commands
1421 * @evt: ibmvfc event to be handled
1422 *
1423 * Used as a callback when sending scsi cmds.
1424 **/
1425static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1426{
1427 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1428 struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1429 struct scsi_cmnd *cmnd = evt->cmnd;
1430 int rsp_len = 0;
1431 int sense_len = rsp->fcp_sense_len;
1432
1433 if (cmnd) {
1434 if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
1435 scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
1436 else if (rsp->flags & FCP_RESID_UNDER)
1437 scsi_set_resid(cmnd, rsp->fcp_resid);
1438 else
1439 scsi_set_resid(cmnd, 0);
1440
1441 if (vfc_cmd->status) {
1442 cmnd->result = ibmvfc_get_err_result(vfc_cmd);
1443
1444 if (rsp->flags & FCP_RSP_LEN_VALID)
1445 rsp_len = rsp->fcp_rsp_len;
1446 if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1447 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1448 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len)
1449 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1450
1451 ibmvfc_log_error(evt);
1452 }
1453
1454 if (!cmnd->result &&
1455 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1456 cmnd->result = (DID_ERROR << 16);
1457
1458 scsi_dma_unmap(cmnd);
1459 cmnd->scsi_done(cmnd);
1460 }
1461
1462 ibmvfc_free_event(evt);
1463}
1464
1465/**
1466 * ibmvfc_host_chkready - Check if the host can accept commands
1467 * @vhost: struct ibmvfc host
1468 *
1469 * Returns:
1470 * 1 if host can accept command / 0 if not
1471 **/
1472static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1473{
1474 int result = 0;
1475
1476 switch (vhost->state) {
1477 case IBMVFC_LINK_DEAD:
1478 case IBMVFC_HOST_OFFLINE:
1479 result = DID_NO_CONNECT << 16;
1480 break;
1481 case IBMVFC_NO_CRQ:
1482 case IBMVFC_INITIALIZING:
1483 case IBMVFC_HALTED:
1484 case IBMVFC_LINK_DOWN:
1485 result = DID_REQUEUE << 16;
1486 break;
1487 case IBMVFC_ACTIVE:
1488 result = 0;
1489 break;
1490 };
1491
1492 return result;
1493}
1494
1495/**
1496 * ibmvfc_queuecommand - The queuecommand function of the scsi template
1497 * @cmnd: struct scsi_cmnd to be executed
1498 * @done: Callback function to be called when cmnd is completed
1499 *
1500 * Returns:
1501 * 0 on success / other on failure
1502 **/
1503static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
1504 void (*done) (struct scsi_cmnd *))
1505{
1506 struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
1507 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1508 struct ibmvfc_cmd *vfc_cmd;
1509 struct ibmvfc_event *evt;
1510 u8 tag[2];
1511 int rc;
1512
1513 if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1514 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1515 cmnd->result = rc;
1516 done(cmnd);
1517 return 0;
1518 }
1519
1520 cmnd->result = (DID_OK << 16);
1521 evt = ibmvfc_get_event(vhost);
1522 ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1523 evt->cmnd = cmnd;
1524 cmnd->scsi_done = done;
1525 vfc_cmd = &evt->iu.cmd;
1526 memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1527 vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1528 vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
1529 vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
1530 vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
1531 vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
1532 vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
1533 vfc_cmd->tgt_scsi_id = rport->port_id;
1534 if ((rport->supported_classes & FC_COS_CLASS3) &&
1535 (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3))
1536 vfc_cmd->flags = IBMVFC_CLASS_3_ERR;
1537 vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
1538 int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
1539 memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
1540
1541 if (scsi_populate_tag_msg(cmnd, tag)) {
1542 vfc_cmd->task_tag = tag[1];
1543 switch (tag[0]) {
1544 case MSG_SIMPLE_TAG:
1545 vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
1546 break;
1547 case MSG_HEAD_TAG:
1548 vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
1549 break;
1550 case MSG_ORDERED_TAG:
1551 vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
1552 break;
1553 };
1554 }
1555
1556 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1557 return ibmvfc_send_event(evt, vhost, 0);
1558
1559 ibmvfc_free_event(evt);
1560 if (rc == -ENOMEM)
1561 return SCSI_MLQUEUE_HOST_BUSY;
1562
1563 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1564 scmd_printk(KERN_ERR, cmnd,
1565 "Failed to map DMA buffer for command. rc=%d\n", rc);
1566
1567 cmnd->result = DID_ERROR << 16;
1568 done(cmnd);
1569 return 0;
1570}
1571
1572/**
1573 * ibmvfc_sync_completion - Signal that a synchronous command has completed
1574 * @evt: ibmvfc event struct
1575 *
1576 **/
1577static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1578{
1579 /* copy the response back */
1580 if (evt->sync_iu)
1581 *evt->sync_iu = *evt->xfer_iu;
1582
1583 complete(&evt->comp);
1584}
1585
1586/**
1587 * ibmvfc_reset_device - Reset the device with the specified reset type
1588 * @sdev: scsi device to reset
1589 * @type: reset type
1590 * @desc: reset type description for log messages
1591 *
1592 * Returns:
1593 * 0 on success / other on failure
1594 **/
1595static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1596{
1597 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1598 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1599 struct ibmvfc_cmd *tmf;
1600 struct ibmvfc_event *evt;
1601 union ibmvfc_iu rsp_iu;
1602 struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1603 int rsp_rc = -EBUSY;
1604 unsigned long flags;
1605 int rsp_code = 0;
1606
1607 spin_lock_irqsave(vhost->host->host_lock, flags);
1608 if (vhost->state == IBMVFC_ACTIVE) {
1609 evt = ibmvfc_get_event(vhost);
1610 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1611
1612 tmf = &evt->iu.cmd;
1613 memset(tmf, 0, sizeof(*tmf));
1614 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1615 tmf->resp.len = sizeof(tmf->rsp);
1616 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1617 tmf->payload_len = sizeof(tmf->iu);
1618 tmf->resp_len = sizeof(tmf->rsp);
1619 tmf->cancel_key = (unsigned long)sdev->hostdata;
1620 tmf->tgt_scsi_id = rport->port_id;
1621 int_to_scsilun(sdev->lun, &tmf->iu.lun);
1622 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
1623 tmf->iu.tmf_flags = type;
1624 evt->sync_iu = &rsp_iu;
1625
1626 init_completion(&evt->comp);
1627 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1628 }
1629 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1630
1631 if (rsp_rc != 0) {
1632 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
1633 desc, rsp_rc);
1634 return -EIO;
1635 }
1636
1637 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
1638 wait_for_completion(&evt->comp);
1639
1640 if (rsp_iu.cmd.status) {
1641 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1642 rsp_code = fc_rsp->data.info.rsp_code;
1643
1644 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
1645 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
1646 desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
1647 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
1648 fc_rsp->scsi_status);
1649 rsp_rc = -EIO;
1650 } else
1651 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
1652
1653 spin_lock_irqsave(vhost->host->host_lock, flags);
1654 ibmvfc_free_event(evt);
1655 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1656 return rsp_rc;
1657}
1658
1659/**
1660 * ibmvfc_abort_task_set - Abort outstanding commands to the device
1661 * @sdev: scsi device to abort commands
1662 *
1663 * This sends an Abort Task Set to the VIOS for the specified device. This does
1664 * NOT send any cancel to the VIOS. That must be done separately.
1665 *
1666 * Returns:
1667 * 0 on success / other on failure
1668 **/
1669static int ibmvfc_abort_task_set(struct scsi_device *sdev)
1670{
1671 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1672 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1673 struct ibmvfc_cmd *tmf;
1674 struct ibmvfc_event *evt, *found_evt;
1675 union ibmvfc_iu rsp_iu;
1676 struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1677 int rsp_rc = -EBUSY;
1678 unsigned long flags;
1679 int rsp_code = 0;
1680
1681 spin_lock_irqsave(vhost->host->host_lock, flags);
1682 found_evt = NULL;
1683 list_for_each_entry(evt, &vhost->sent, queue) {
1684 if (evt->cmnd && evt->cmnd->device == sdev) {
1685 found_evt = evt;
1686 break;
1687 }
1688 }
1689
1690 if (!found_evt) {
1691 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1692 sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
1693 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1694 return 0;
1695 }
1696
1697 if (vhost->state == IBMVFC_ACTIVE) {
1698 evt = ibmvfc_get_event(vhost);
1699 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1700
1701 tmf = &evt->iu.cmd;
1702 memset(tmf, 0, sizeof(*tmf));
1703 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1704 tmf->resp.len = sizeof(tmf->rsp);
1705 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1706 tmf->payload_len = sizeof(tmf->iu);
1707 tmf->resp_len = sizeof(tmf->rsp);
1708 tmf->cancel_key = (unsigned long)sdev->hostdata;
1709 tmf->tgt_scsi_id = rport->port_id;
1710 int_to_scsilun(sdev->lun, &tmf->iu.lun);
1711 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
1712 tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
1713 evt->sync_iu = &rsp_iu;
1714
1715 init_completion(&evt->comp);
1716 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1717 }
1718
1719 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1720
1721 if (rsp_rc != 0) {
1722 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
1723 return -EIO;
1724 }
1725
1726 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
1727 wait_for_completion(&evt->comp);
1728
1729 if (rsp_iu.cmd.status) {
1730 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1731 rsp_code = fc_rsp->data.info.rsp_code;
1732
1733 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
1734 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
1735 ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
1736 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
1737 fc_rsp->scsi_status);
1738 rsp_rc = -EIO;
1739 } else
1740 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
1741
1742 spin_lock_irqsave(vhost->host->host_lock, flags);
1743 ibmvfc_free_event(evt);
1744 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1745 return rsp_rc;
1746}
1747
1748/**
1749 * ibmvfc_cancel_all - Cancel all outstanding commands to the device
1750 * @sdev: scsi device to cancel commands
1751 * @type: type of error recovery being performed
1752 *
1753 * This sends a cancel to the VIOS for the specified device. This does
1754 * NOT send any abort to the actual device. That must be done separately.
1755 *
1756 * Returns:
1757 * 0 on success / other on failure
1758 **/
1759static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1760{
1761 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1762 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1763 struct ibmvfc_tmf *tmf;
1764 struct ibmvfc_event *evt, *found_evt;
1765 union ibmvfc_iu rsp;
1766 int rsp_rc = -EBUSY;
1767 unsigned long flags;
1768 u16 status;
1769
1770 ENTER;
1771 spin_lock_irqsave(vhost->host->host_lock, flags);
1772 found_evt = NULL;
1773 list_for_each_entry(evt, &vhost->sent, queue) {
1774 if (evt->cmnd && evt->cmnd->device == sdev) {
1775 found_evt = evt;
1776 break;
1777 }
1778 }
1779
1780 if (!found_evt) {
1781 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1782 sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
1783 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1784 return 0;
1785 }
1786
1787 if (vhost->state == IBMVFC_ACTIVE) {
1788 evt = ibmvfc_get_event(vhost);
1789 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1790
1791 tmf = &evt->iu.tmf;
1792 memset(tmf, 0, sizeof(*tmf));
1793 tmf->common.version = 1;
1794 tmf->common.opcode = IBMVFC_TMF_MAD;
1795 tmf->common.length = sizeof(*tmf);
1796 tmf->scsi_id = rport->port_id;
1797 int_to_scsilun(sdev->lun, &tmf->lun);
1798 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
1799 tmf->cancel_key = (unsigned long)sdev->hostdata;
1800 tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata);
1801
1802 evt->sync_iu = &rsp;
1803 init_completion(&evt->comp);
1804 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1805 }
1806
1807 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1808
1809 if (rsp_rc != 0) {
1810 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
1811 return -EIO;
1812 }
1813
1814 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
1815
1816 wait_for_completion(&evt->comp);
1817 status = rsp.mad_common.status;
1818 spin_lock_irqsave(vhost->host->host_lock, flags);
1819 ibmvfc_free_event(evt);
1820 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1821
1822 if (status != IBMVFC_MAD_SUCCESS) {
1823 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
1824 return -EIO;
1825 }
1826
1827 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
1828 return 0;
1829}
1830
1831/**
1832 * ibmvfc_eh_abort_handler - Abort a command
1833 * @cmd: scsi command to abort
1834 *
1835 * Returns:
1836 * SUCCESS / FAILED
1837 **/
1838static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1839{
1840 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1841 struct ibmvfc_event *evt, *pos;
1842 int cancel_rc, abort_rc;
1843 unsigned long flags;
1844
1845 ENTER;
1846 ibmvfc_wait_while_resetting(vhost);
1847 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET);
1848 abort_rc = ibmvfc_abort_task_set(cmd->device);
1849
1850 if (!cancel_rc && !abort_rc) {
1851 spin_lock_irqsave(vhost->host->host_lock, flags);
1852 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1853 if (evt->cmnd && evt->cmnd->device == cmd->device)
1854 ibmvfc_fail_request(evt, DID_ABORT);
1855 }
1856 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1857 LEAVE;
1858 return SUCCESS;
1859 }
1860
1861 LEAVE;
1862 return FAILED;
1863}
1864
1865/**
1866 * ibmvfc_eh_device_reset_handler - Reset a single LUN
1867 * @cmd: scsi command struct
1868 *
1869 * Returns:
1870 * SUCCESS / FAILED
1871 **/
1872static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
1873{
1874 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1875 struct ibmvfc_event *evt, *pos;
1876 int cancel_rc, reset_rc;
1877 unsigned long flags;
1878
1879 ENTER;
1880 ibmvfc_wait_while_resetting(vhost);
1881 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET);
1882 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN");
1883
1884 if (!cancel_rc && !reset_rc) {
1885 spin_lock_irqsave(vhost->host->host_lock, flags);
1886 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1887 if (evt->cmnd && evt->cmnd->device == cmd->device)
1888 ibmvfc_fail_request(evt, DID_ABORT);
1889 }
1890 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1891 LEAVE;
1892 return SUCCESS;
1893 }
1894
1895 LEAVE;
1896 return FAILED;
1897}
1898
1899/**
1900 * ibmvfc_dev_cancel_all - Device iterated cancel all function
1901 * @sdev: scsi device struct
1902 * @data: return code
1903 *
1904 **/
1905static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data)
1906{
1907 unsigned long *rc = data;
1908 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
1909}
1910
1911/**
1912 * ibmvfc_dev_abort_all - Device iterated abort task set function
1913 * @sdev: scsi device struct
1914 * @data: return code
1915 *
1916 **/
1917static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
1918{
1919 unsigned long *rc = data;
1920 *rc |= ibmvfc_abort_task_set(sdev);
1921}
1922
1923/**
1924 * ibmvfc_eh_target_reset_handler - Reset the target
1925 * @cmd: scsi command struct
1926 *
1927 * Returns:
1928 * SUCCESS / FAILED
1929 **/
1930static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
1931{
1932 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1933 struct scsi_target *starget = scsi_target(cmd->device);
1934 struct ibmvfc_event *evt, *pos;
1935 int reset_rc;
1936 unsigned long cancel_rc = 0;
1937 unsigned long flags;
1938
1939 ENTER;
1940 ibmvfc_wait_while_resetting(vhost);
1941 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
1942 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target");
1943
1944 if (!cancel_rc && !reset_rc) {
1945 spin_lock_irqsave(vhost->host->host_lock, flags);
1946 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1947 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
1948 ibmvfc_fail_request(evt, DID_ABORT);
1949 }
1950 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1951 LEAVE;
1952 return SUCCESS;
1953 }
1954
1955 LEAVE;
1956 return FAILED;
1957}
1958
1959/**
1960 * ibmvfc_eh_host_reset_handler - Reset the connection to the server
1961 * @cmd: struct scsi_cmnd having problems
1962 *
1963 **/
1964static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
1965{
1966 int rc;
1967 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1968
1969 dev_err(vhost->dev, "Resetting connection due to error recovery\n");
1970 rc = ibmvfc_issue_fc_host_lip(vhost->host);
1971 return rc ? FAILED : SUCCESS;
1972}
1973
1974/**
1975 * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
1976 * @rport: rport struct
1977 *
1978 * Return value:
1979 * none
1980 **/
1981static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
1982{
1983 struct scsi_target *starget = to_scsi_target(&rport->dev);
1984 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1985 struct ibmvfc_host *vhost = shost_priv(shost);
1986 struct ibmvfc_event *evt, *pos;
1987 unsigned long cancel_rc = 0;
1988 unsigned long abort_rc = 0;
1989 unsigned long flags;
1990
1991 ENTER;
1992 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
1993 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
1994
1995 if (!cancel_rc && !abort_rc) {
1996 spin_lock_irqsave(shost->host_lock, flags);
1997 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1998 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
1999 ibmvfc_fail_request(evt, DID_ABORT);
2000 }
2001 spin_unlock_irqrestore(shost->host_lock, flags);
2002 } else
2003 ibmvfc_issue_fc_host_lip(shost);
2004
2005 scsi_target_unblock(&rport->dev);
2006 LEAVE;
2007}
2008
2009static const struct {
2010 enum ibmvfc_async_event ae;
2011 const char *desc;
2012} ae_desc [] = {
2013 { IBMVFC_AE_ELS_PLOGI, "PLOGI" },
2014 { IBMVFC_AE_ELS_LOGO, "LOGO" },
2015 { IBMVFC_AE_ELS_PRLO, "PRLO" },
2016 { IBMVFC_AE_SCN_NPORT, "N-Port SCN" },
2017 { IBMVFC_AE_SCN_GROUP, "Group SCN" },
2018 { IBMVFC_AE_SCN_DOMAIN, "Domain SCN" },
2019 { IBMVFC_AE_SCN_FABRIC, "Fabric SCN" },
2020 { IBMVFC_AE_LINK_UP, "Link Up" },
2021 { IBMVFC_AE_LINK_DOWN, "Link Down" },
2022 { IBMVFC_AE_LINK_DEAD, "Link Dead" },
2023 { IBMVFC_AE_HALT, "Halt" },
2024 { IBMVFC_AE_RESUME, "Resume" },
2025 { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed" },
2026};
2027
2028static const char *unknown_ae = "Unknown async";
2029
2030/**
2031 * ibmvfc_get_ae_desc - Get text description for async event
2032 * @ae: async event
2033 *
2034 **/
2035static const char *ibmvfc_get_ae_desc(u64 ae)
2036{
2037 int i;
2038
2039 for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2040 if (ae_desc[i].ae == ae)
2041 return ae_desc[i].desc;
2042
2043 return unknown_ae;
2044}
2045
2046/**
2047 * ibmvfc_handle_async - Handle an async event from the adapter
2048 * @crq: crq to process
2049 * @vhost: ibmvfc host struct
2050 *
2051 **/
2052static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2053 struct ibmvfc_host *vhost)
2054{
2055 const char *desc = ibmvfc_get_ae_desc(crq->event);
2056
2057 ibmvfc_log(vhost, 2, "%s event received\n", desc);
2058
2059 switch (crq->event) {
2060 case IBMVFC_AE_LINK_UP:
2061 case IBMVFC_AE_RESUME:
2062 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2063 ibmvfc_init_host(vhost);
2064 break;
2065 case IBMVFC_AE_SCN_FABRIC:
2066 vhost->events_to_log |= IBMVFC_AE_RSCN;
2067 ibmvfc_init_host(vhost);
2068 break;
2069 case IBMVFC_AE_SCN_NPORT:
2070 case IBMVFC_AE_SCN_GROUP:
2071 case IBMVFC_AE_SCN_DOMAIN:
2072 vhost->events_to_log |= IBMVFC_AE_RSCN;
2073 case IBMVFC_AE_ELS_LOGO:
2074 case IBMVFC_AE_ELS_PRLO:
2075 case IBMVFC_AE_ELS_PLOGI:
2076 ibmvfc_reinit_host(vhost);
2077 break;
2078 case IBMVFC_AE_LINK_DOWN:
2079 case IBMVFC_AE_ADAPTER_FAILED:
2080 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2081 break;
2082 case IBMVFC_AE_LINK_DEAD:
2083 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2084 break;
2085 case IBMVFC_AE_HALT:
2086 ibmvfc_link_down(vhost, IBMVFC_HALTED);
2087 break;
2088 default:
2089 dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event);
2090 break;
2091 };
2092}
2093
2094/**
2095 * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2096 * @crq: Command/Response queue
2097 * @vhost: ibmvfc host struct
2098 *
2099 **/
2100static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2101{
2102 long rc;
2103 struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
2104
2105 switch (crq->valid) {
2106 case IBMVFC_CRQ_INIT_RSP:
2107 switch (crq->format) {
2108 case IBMVFC_CRQ_INIT:
2109 dev_info(vhost->dev, "Partner initialized\n");
2110 /* Send back a response */
2111 rc = ibmvfc_send_crq_init_complete(vhost);
2112 if (rc == 0)
2113 ibmvfc_init_host(vhost);
2114 else
2115 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2116 break;
2117 case IBMVFC_CRQ_INIT_COMPLETE:
2118 dev_info(vhost->dev, "Partner initialization complete\n");
2119 ibmvfc_init_host(vhost);
2120 break;
2121 default:
2122 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2123 }
2124 return;
2125 case IBMVFC_CRQ_XPORT_EVENT:
2126 vhost->state = IBMVFC_NO_CRQ;
2127 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2128 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2129 /* We need to re-setup the interpartition connection */
2130 dev_info(vhost->dev, "Re-enabling adapter\n");
2131 vhost->client_migrated = 1;
2132 ibmvfc_purge_requests(vhost, DID_REQUEUE);
2133 if ((rc = ibmvfc_reenable_crq_queue(vhost)) ||
2134 (rc = ibmvfc_send_crq_init(vhost))) {
2135 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2136 dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc);
2137 } else
2138 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2139 } else {
2140 dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
2141
2142 ibmvfc_purge_requests(vhost, DID_ERROR);
2143 if ((rc = ibmvfc_reset_crq(vhost)) ||
2144 (rc = ibmvfc_send_crq_init(vhost))) {
2145 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2146 dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc);
2147 } else
2148 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2149 }
2150 return;
2151 case IBMVFC_CRQ_CMD_RSP:
2152 break;
2153 default:
2154 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2155 return;
2156 }
2157
2158 if (crq->format == IBMVFC_ASYNC_EVENT)
2159 return;
2160
2161 /* The only kind of payload CRQs we should get are responses to
2162 * things we send. Make sure this response is to something we
2163 * actually sent
2164 */
2165 if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2166 dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n",
2167 crq->ioba);
2168 return;
2169 }
2170
2171 if (unlikely(atomic_read(&evt->free))) {
2172 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n",
2173 crq->ioba);
2174 return;
2175 }
2176
2177 del_timer(&evt->timer);
2178 list_del(&evt->queue);
2179 ibmvfc_trc_end(evt);
2180 evt->done(evt);
2181}
2182
2183/**
2184 * ibmvfc_scan_finished - Check if the device scan is done.
2185 * @shost: scsi host struct
2186 * @time: current elapsed time
2187 *
2188 * Returns:
2189 * 0 if scan is not done / 1 if scan is done
2190 **/
2191static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2192{
2193 unsigned long flags;
2194 struct ibmvfc_host *vhost = shost_priv(shost);
2195 int done = 0;
2196
2197 spin_lock_irqsave(shost->host_lock, flags);
2198 if (time >= (init_timeout * HZ)) {
2199 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
2200 "continuing initialization\n", init_timeout);
2201 done = 1;
2202 }
2203
2204 if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
2205 done = 1;
2206 spin_unlock_irqrestore(shost->host_lock, flags);
2207 return done;
2208}
2209
2210/**
2211 * ibmvfc_slave_alloc - Setup the device's task set value
2212 * @sdev: struct scsi_device device to configure
2213 *
2214 * Set the device's task set value so that error handling works as
2215 * expected.
2216 *
2217 * Returns:
2218 * 0 on success / -ENXIO if device does not exist
2219 **/
2220static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2221{
2222 struct Scsi_Host *shost = sdev->host;
2223 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2224 struct ibmvfc_host *vhost = shost_priv(shost);
2225 unsigned long flags = 0;
2226
2227 if (!rport || fc_remote_port_chkready(rport))
2228 return -ENXIO;
2229
2230 spin_lock_irqsave(shost->host_lock, flags);
2231 sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
2232 spin_unlock_irqrestore(shost->host_lock, flags);
2233 return 0;
2234}
2235
2236/**
2237 * ibmvfc_slave_configure - Configure the device
2238 * @sdev: struct scsi_device device to configure
2239 *
2240 * Enable allow_restart for a device if it is a disk. Adjust the
2241 * queue_depth here also.
2242 *
2243 * Returns:
2244 * 0
2245 **/
2246static int ibmvfc_slave_configure(struct scsi_device *sdev)
2247{
2248 struct Scsi_Host *shost = sdev->host;
2249 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2250 unsigned long flags = 0;
2251
2252 spin_lock_irqsave(shost->host_lock, flags);
2253 if (sdev->type == TYPE_DISK)
2254 sdev->allow_restart = 1;
2255
2256 if (sdev->tagged_supported) {
2257 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2258 scsi_activate_tcq(sdev, sdev->queue_depth);
2259 } else
2260 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2261
2262 rport->dev_loss_tmo = dev_loss_tmo;
2263 spin_unlock_irqrestore(shost->host_lock, flags);
2264 return 0;
2265}
2266
2267/**
2268 * ibmvfc_change_queue_depth - Change the device's queue depth
2269 * @sdev: scsi device struct
2270 * @qdepth: depth to set
2271 *
2272 * Return value:
2273 * actual depth set
2274 **/
2275static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2276{
2277 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2278 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2279
2280 scsi_adjust_queue_depth(sdev, 0, qdepth);
2281 return sdev->queue_depth;
2282}
2283
2284/**
2285 * ibmvfc_change_queue_type - Change the device's queue type
2286 * @sdev: scsi device struct
2287 * @tag_type: type of tags to use
2288 *
2289 * Return value:
2290 * actual queue type set
2291 **/
2292static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
2293{
2294 if (sdev->tagged_supported) {
2295 scsi_set_tag_type(sdev, tag_type);
2296
2297 if (tag_type)
2298 scsi_activate_tcq(sdev, sdev->queue_depth);
2299 else
2300 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2301 } else
2302 tag_type = 0;
2303
2304 return tag_type;
2305}
2306
2307static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2308 struct device_attribute *attr, char *buf)
2309{
2310 struct Scsi_Host *shost = class_to_shost(dev);
2311 struct ibmvfc_host *vhost = shost_priv(shost);
2312
2313 return snprintf(buf, PAGE_SIZE, "%s\n",
2314 vhost->login_buf->resp.partition_name);
2315}
2316
2317static struct device_attribute ibmvfc_host_partition_name = {
2318 .attr = {
2319 .name = "partition_name",
2320 .mode = S_IRUGO,
2321 },
2322 .show = ibmvfc_show_host_partition_name,
2323};
2324
2325static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2326 struct device_attribute *attr, char *buf)
2327{
2328 struct Scsi_Host *shost = class_to_shost(dev);
2329 struct ibmvfc_host *vhost = shost_priv(shost);
2330
2331 return snprintf(buf, PAGE_SIZE, "%s\n",
2332 vhost->login_buf->resp.device_name);
2333}
2334
2335static struct device_attribute ibmvfc_host_device_name = {
2336 .attr = {
2337 .name = "device_name",
2338 .mode = S_IRUGO,
2339 },
2340 .show = ibmvfc_show_host_device_name,
2341};
2342
2343static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2344 struct device_attribute *attr, char *buf)
2345{
2346 struct Scsi_Host *shost = class_to_shost(dev);
2347 struct ibmvfc_host *vhost = shost_priv(shost);
2348
2349 return snprintf(buf, PAGE_SIZE, "%s\n",
2350 vhost->login_buf->resp.port_loc_code);
2351}
2352
2353static struct device_attribute ibmvfc_host_loc_code = {
2354 .attr = {
2355 .name = "port_loc_code",
2356 .mode = S_IRUGO,
2357 },
2358 .show = ibmvfc_show_host_loc_code,
2359};
2360
2361static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2362 struct device_attribute *attr, char *buf)
2363{
2364 struct Scsi_Host *shost = class_to_shost(dev);
2365 struct ibmvfc_host *vhost = shost_priv(shost);
2366
2367 return snprintf(buf, PAGE_SIZE, "%s\n",
2368 vhost->login_buf->resp.drc_name);
2369}
2370
2371static struct device_attribute ibmvfc_host_drc_name = {
2372 .attr = {
2373 .name = "drc_name",
2374 .mode = S_IRUGO,
2375 },
2376 .show = ibmvfc_show_host_drc_name,
2377};
2378
2379static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2380 struct device_attribute *attr, char *buf)
2381{
2382 struct Scsi_Host *shost = class_to_shost(dev);
2383 struct ibmvfc_host *vhost = shost_priv(shost);
2384 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2385}
2386
2387static struct device_attribute ibmvfc_host_npiv_version = {
2388 .attr = {
2389 .name = "npiv_version",
2390 .mode = S_IRUGO,
2391 },
2392 .show = ibmvfc_show_host_npiv_version,
2393};
2394
2395/**
2396 * ibmvfc_show_log_level - Show the adapter's error logging level
2397 * @dev: class device struct
2398 * @buf: buffer
2399 *
2400 * Return value:
2401 * number of bytes printed to buffer
2402 **/
2403static ssize_t ibmvfc_show_log_level(struct device *dev,
2404 struct device_attribute *attr, char *buf)
2405{
2406 struct Scsi_Host *shost = class_to_shost(dev);
2407 struct ibmvfc_host *vhost = shost_priv(shost);
2408 unsigned long flags = 0;
2409 int len;
2410
2411 spin_lock_irqsave(shost->host_lock, flags);
2412 len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
2413 spin_unlock_irqrestore(shost->host_lock, flags);
2414 return len;
2415}
2416
2417/**
2418 * ibmvfc_store_log_level - Change the adapter's error logging level
2419 * @dev: class device struct
2420 * @buf: buffer
2421 *
2422 * Return value:
2423 * number of bytes printed to buffer
2424 **/
2425static ssize_t ibmvfc_store_log_level(struct device *dev,
2426 struct device_attribute *attr,
2427 const char *buf, size_t count)
2428{
2429 struct Scsi_Host *shost = class_to_shost(dev);
2430 struct ibmvfc_host *vhost = shost_priv(shost);
2431 unsigned long flags = 0;
2432
2433 spin_lock_irqsave(shost->host_lock, flags);
2434 vhost->log_level = simple_strtoul(buf, NULL, 10);
2435 spin_unlock_irqrestore(shost->host_lock, flags);
2436 return strlen(buf);
2437}
2438
2439static struct device_attribute ibmvfc_log_level_attr = {
2440 .attr = {
2441 .name = "log_level",
2442 .mode = S_IRUGO | S_IWUSR,
2443 },
2444 .show = ibmvfc_show_log_level,
2445 .store = ibmvfc_store_log_level
2446};
2447
2448#ifdef CONFIG_SCSI_IBMVFC_TRACE
2449/**
2450 * ibmvfc_read_trace - Dump the adapter trace
2451 * @kobj: kobject struct
2452 * @bin_attr: bin_attribute struct
2453 * @buf: buffer
2454 * @off: offset
2455 * @count: buffer size
2456 *
2457 * Return value:
2458 * number of bytes printed to buffer
2459 **/
2460static ssize_t ibmvfc_read_trace(struct kobject *kobj,
2461 struct bin_attribute *bin_attr,
2462 char *buf, loff_t off, size_t count)
2463{
2464 struct device *dev = container_of(kobj, struct device, kobj);
2465 struct Scsi_Host *shost = class_to_shost(dev);
2466 struct ibmvfc_host *vhost = shost_priv(shost);
2467 unsigned long flags = 0;
2468 int size = IBMVFC_TRACE_SIZE;
2469 char *src = (char *)vhost->trace;
2470
2471 if (off > size)
2472 return 0;
2473 if (off + count > size) {
2474 size -= off;
2475 count = size;
2476 }
2477
2478 spin_lock_irqsave(shost->host_lock, flags);
2479 memcpy(buf, &src[off], count);
2480 spin_unlock_irqrestore(shost->host_lock, flags);
2481 return count;
2482}
2483
2484static struct bin_attribute ibmvfc_trace_attr = {
2485 .attr = {
2486 .name = "trace",
2487 .mode = S_IRUGO,
2488 },
2489 .size = 0,
2490 .read = ibmvfc_read_trace,
2491};
2492#endif
2493
2494static struct device_attribute *ibmvfc_attrs[] = {
2495 &ibmvfc_host_partition_name,
2496 &ibmvfc_host_device_name,
2497 &ibmvfc_host_loc_code,
2498 &ibmvfc_host_drc_name,
2499 &ibmvfc_host_npiv_version,
2500 &ibmvfc_log_level_attr,
2501 NULL
2502};
2503
2504static struct scsi_host_template driver_template = {
2505 .module = THIS_MODULE,
2506 .name = "IBM POWER Virtual FC Adapter",
2507 .proc_name = IBMVFC_NAME,
2508 .queuecommand = ibmvfc_queuecommand,
2509 .eh_abort_handler = ibmvfc_eh_abort_handler,
2510 .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
2511 .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
2512 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
2513 .slave_alloc = ibmvfc_slave_alloc,
2514 .slave_configure = ibmvfc_slave_configure,
2515 .scan_finished = ibmvfc_scan_finished,
2516 .change_queue_depth = ibmvfc_change_queue_depth,
2517 .change_queue_type = ibmvfc_change_queue_type,
2518 .cmd_per_lun = 16,
2519 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
2520 .this_id = -1,
2521 .sg_tablesize = SG_ALL,
2522 .max_sectors = IBMVFC_MAX_SECTORS,
2523 .use_clustering = ENABLE_CLUSTERING,
2524 .shost_attrs = ibmvfc_attrs,
2525};
2526
2527/**
2528 * ibmvfc_next_async_crq - Returns the next entry in async queue
2529 * @vhost: ibmvfc host struct
2530 *
2531 * Returns:
2532 * Pointer to next entry in queue / NULL if empty
2533 **/
2534static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
2535{
2536 struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
2537 struct ibmvfc_async_crq *crq;
2538
2539 crq = &async_crq->msgs[async_crq->cur];
2540 if (crq->valid & 0x80) {
2541 if (++async_crq->cur == async_crq->size)
2542 async_crq->cur = 0;
2543 } else
2544 crq = NULL;
2545
2546 return crq;
2547}
2548
2549/**
2550 * ibmvfc_next_crq - Returns the next entry in message queue
2551 * @vhost: ibmvfc host struct
2552 *
2553 * Returns:
2554 * Pointer to next entry in queue / NULL if empty
2555 **/
2556static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
2557{
2558 struct ibmvfc_crq_queue *queue = &vhost->crq;
2559 struct ibmvfc_crq *crq;
2560
2561 crq = &queue->msgs[queue->cur];
2562 if (crq->valid & 0x80) {
2563 if (++queue->cur == queue->size)
2564 queue->cur = 0;
2565 } else
2566 crq = NULL;
2567
2568 return crq;
2569}
2570
2571/**
2572 * ibmvfc_interrupt - Interrupt handler
2573 * @irq: number of irq to handle, not used
2574 * @dev_instance: ibmvfc_host that received interrupt
2575 *
2576 * Returns:
2577 * IRQ_HANDLED
2578 **/
2579static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
2580{
2581 struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
2582 struct vio_dev *vdev = to_vio_dev(vhost->dev);
2583 struct ibmvfc_crq *crq;
2584 struct ibmvfc_async_crq *async;
2585 unsigned long flags;
2586 int done = 0;
2587
2588 spin_lock_irqsave(vhost->host->host_lock, flags);
2589 vio_disable_interrupts(to_vio_dev(vhost->dev));
2590 while (!done) {
2591 /* Pull all the valid messages off the CRQ */
2592 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2593 ibmvfc_handle_crq(crq, vhost);
2594 crq->valid = 0;
2595 }
2596
2597 /* Pull all the valid messages off the async CRQ */
2598 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2599 ibmvfc_handle_async(async, vhost);
2600 async->valid = 0;
2601 }
2602
2603 vio_enable_interrupts(vdev);
2604 if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2605 vio_disable_interrupts(vdev);
2606 ibmvfc_handle_crq(crq, vhost);
2607 crq->valid = 0;
2608 } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2609 vio_disable_interrupts(vdev);
2610 ibmvfc_handle_async(async, vhost);
2611 crq->valid = 0;
2612 } else
2613 done = 1;
2614 }
2615
2616 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2617 return IRQ_HANDLED;
2618}
2619
2620/**
2621 * ibmvfc_init_tgt - Set the next init job step for the target
2622 * @tgt: ibmvfc target struct
2623 * @job_step: job step to perform
2624 *
2625 **/
2626static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2627 void (*job_step) (struct ibmvfc_target *))
2628{
2629 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
2630 tgt->job_step = job_step;
2631 wake_up(&tgt->vhost->work_wait_q);
2632}
2633
2634/**
2635 * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
2636 * @tgt: ibmvfc target struct
2637 * @job_step: initialization job step
2638 *
2639 **/
2640static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2641 void (*job_step) (struct ibmvfc_target *))
2642{
2643 if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
2644 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2645 wake_up(&tgt->vhost->work_wait_q);
2646 } else
2647 ibmvfc_init_tgt(tgt, job_step);
2648}
2649
2650/**
2651 * ibmvfc_release_tgt - Free memory allocated for a target
2652 * @kref: kref struct
2653 *
2654 **/
2655static void ibmvfc_release_tgt(struct kref *kref)
2656{
2657 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
2658 kfree(tgt);
2659}
2660
2661/**
2662 * ibmvfc_tgt_prli_done - Completion handler for Process Login
2663 * @evt: ibmvfc event struct
2664 *
2665 **/
2666static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2667{
2668 struct ibmvfc_target *tgt = evt->tgt;
2669 struct ibmvfc_host *vhost = evt->vhost;
2670 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2671 u32 status = rsp->common.status;
2672
2673 vhost->discovery_threads--;
2674 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2675 switch (status) {
2676 case IBMVFC_MAD_SUCCESS:
2677 tgt_dbg(tgt, "Process Login succeeded\n");
2678 tgt->need_login = 0;
2679 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
2680 break;
2681 case IBMVFC_MAD_DRIVER_FAILED:
2682 break;
2683 case IBMVFC_MAD_CRQ_ERROR:
2684 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2685 break;
2686 case IBMVFC_MAD_FAILED:
2687 default:
2688 tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2689 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2690 rsp->status, rsp->error, status);
2691 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2692 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2693 break;
2694 };
2695
2696 kref_put(&tgt->kref, ibmvfc_release_tgt);
2697 ibmvfc_free_event(evt);
2698 wake_up(&vhost->work_wait_q);
2699}
2700
2701/**
2702 * ibmvfc_tgt_send_prli - Send a process login
2703 * @tgt: ibmvfc target struct
2704 *
2705 **/
2706static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
2707{
2708 struct ibmvfc_process_login *prli;
2709 struct ibmvfc_host *vhost = tgt->vhost;
2710 struct ibmvfc_event *evt;
2711
2712 if (vhost->discovery_threads >= disc_threads)
2713 return;
2714
2715 kref_get(&tgt->kref);
2716 evt = ibmvfc_get_event(vhost);
2717 vhost->discovery_threads++;
2718 ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
2719 evt->tgt = tgt;
2720 prli = &evt->iu.prli;
2721 memset(prli, 0, sizeof(*prli));
2722 prli->common.version = 1;
2723 prli->common.opcode = IBMVFC_PROCESS_LOGIN;
2724 prli->common.length = sizeof(*prli);
2725 prli->scsi_id = tgt->scsi_id;
2726
2727 prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
2728 prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
2729 prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
2730
2731 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2732 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2733 vhost->discovery_threads--;
2734 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2735 kref_put(&tgt->kref, ibmvfc_release_tgt);
2736 } else
2737 tgt_dbg(tgt, "Sent process login\n");
2738}
2739
2740/**
2741 * ibmvfc_tgt_plogi_done - Completion handler for Port Login
2742 * @evt: ibmvfc event struct
2743 *
2744 **/
2745static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2746{
2747 struct ibmvfc_target *tgt = evt->tgt;
2748 struct ibmvfc_host *vhost = evt->vhost;
2749 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
2750 u32 status = rsp->common.status;
2751
2752 vhost->discovery_threads--;
2753 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2754 switch (status) {
2755 case IBMVFC_MAD_SUCCESS:
2756 tgt_dbg(tgt, "Port Login succeeded\n");
2757 if (tgt->ids.port_name &&
2758 tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
2759 vhost->reinit = 1;
2760 tgt_dbg(tgt, "Port re-init required\n");
2761 break;
2762 }
2763 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
2764 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
2765 tgt->ids.port_id = tgt->scsi_id;
2766 tgt->ids.roles = FC_PORT_ROLE_FCP_TARGET;
2767 memcpy(&tgt->service_parms, &rsp->service_parms,
2768 sizeof(tgt->service_parms));
2769 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
2770 sizeof(tgt->service_parms_change));
2771 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
2772 break;
2773 case IBMVFC_MAD_DRIVER_FAILED:
2774 break;
2775 case IBMVFC_MAD_CRQ_ERROR:
2776 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2777 break;
2778 case IBMVFC_MAD_FAILED:
2779 default:
2780 tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2781 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2782 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2783 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2784
2785 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2786 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2787 break;
2788 };
2789
2790 kref_put(&tgt->kref, ibmvfc_release_tgt);
2791 ibmvfc_free_event(evt);
2792 wake_up(&vhost->work_wait_q);
2793}
2794
2795/**
2796 * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
2797 * @tgt: ibmvfc target struct
2798 *
2799 **/
2800static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
2801{
2802 struct ibmvfc_port_login *plogi;
2803 struct ibmvfc_host *vhost = tgt->vhost;
2804 struct ibmvfc_event *evt;
2805
2806 if (vhost->discovery_threads >= disc_threads)
2807 return;
2808
2809 kref_get(&tgt->kref);
2810 evt = ibmvfc_get_event(vhost);
2811 vhost->discovery_threads++;
2812 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2813 ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
2814 evt->tgt = tgt;
2815 plogi = &evt->iu.plogi;
2816 memset(plogi, 0, sizeof(*plogi));
2817 plogi->common.version = 1;
2818 plogi->common.opcode = IBMVFC_PORT_LOGIN;
2819 plogi->common.length = sizeof(*plogi);
2820 plogi->scsi_id = tgt->scsi_id;
2821
2822 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2823 vhost->discovery_threads--;
2824 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2825 kref_put(&tgt->kref, ibmvfc_release_tgt);
2826 } else
2827 tgt_dbg(tgt, "Sent port login\n");
2828}
2829
2830/**
2831 * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
2832 * @evt: ibmvfc event struct
2833 *
2834 **/
2835static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
2836{
2837 struct ibmvfc_target *tgt = evt->tgt;
2838 struct ibmvfc_host *vhost = evt->vhost;
2839 struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
2840 u32 status = rsp->common.status;
2841
2842 vhost->discovery_threads--;
2843 ibmvfc_free_event(evt);
2844 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2845
2846 switch (status) {
2847 case IBMVFC_MAD_SUCCESS:
2848 tgt_dbg(tgt, "Implicit Logout succeeded\n");
2849 break;
2850 case IBMVFC_MAD_DRIVER_FAILED:
2851 kref_put(&tgt->kref, ibmvfc_release_tgt);
2852 wake_up(&vhost->work_wait_q);
2853 return;
2854 case IBMVFC_MAD_FAILED:
2855 default:
2856 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
2857 break;
2858 };
2859
2860 if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
2861 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
2862 else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
2863 tgt->scsi_id != tgt->new_scsi_id)
2864 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2865 kref_put(&tgt->kref, ibmvfc_release_tgt);
2866 wake_up(&vhost->work_wait_q);
2867}
2868
2869/**
2870 * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
2871 * @tgt: ibmvfc target struct
2872 *
2873 **/
2874static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
2875{
2876 struct ibmvfc_implicit_logout *mad;
2877 struct ibmvfc_host *vhost = tgt->vhost;
2878 struct ibmvfc_event *evt;
2879
2880 if (vhost->discovery_threads >= disc_threads)
2881 return;
2882
2883 kref_get(&tgt->kref);
2884 evt = ibmvfc_get_event(vhost);
2885 vhost->discovery_threads++;
2886 ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
2887 evt->tgt = tgt;
2888 mad = &evt->iu.implicit_logout;
2889 memset(mad, 0, sizeof(*mad));
2890 mad->common.version = 1;
2891 mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
2892 mad->common.length = sizeof(*mad);
2893 mad->old_scsi_id = tgt->scsi_id;
2894
2895 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2896 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2897 vhost->discovery_threads--;
2898 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2899 kref_put(&tgt->kref, ibmvfc_release_tgt);
2900 } else
2901 tgt_dbg(tgt, "Sent Implicit Logout\n");
2902}
2903
2904/**
2905 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
2906 * @evt: ibmvfc event struct
2907 *
2908 **/
2909static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
2910{
2911 struct ibmvfc_target *tgt = evt->tgt;
2912 struct ibmvfc_host *vhost = evt->vhost;
2913 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
2914 u32 status = rsp->common.status;
2915
2916 vhost->discovery_threads--;
2917 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2918 switch (status) {
2919 case IBMVFC_MAD_SUCCESS:
2920 tgt_dbg(tgt, "Query Target succeeded\n");
2921 tgt->new_scsi_id = rsp->scsi_id;
2922 if (rsp->scsi_id != tgt->scsi_id)
2923 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
2924 break;
2925 case IBMVFC_MAD_DRIVER_FAILED:
2926 break;
2927 case IBMVFC_MAD_CRQ_ERROR:
2928 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
2929 break;
2930 case IBMVFC_MAD_FAILED:
2931 default:
2932 tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2933 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2934 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2935 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
2936
2937 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
2938 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
2939 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
2940 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2941 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2942 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
2943 break;
2944 };
2945
2946 kref_put(&tgt->kref, ibmvfc_release_tgt);
2947 ibmvfc_free_event(evt);
2948 wake_up(&vhost->work_wait_q);
2949}
2950
2951/**
2952 * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
2953 * @tgt: ibmvfc target struct
2954 *
2955 **/
2956static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
2957{
2958 struct ibmvfc_query_tgt *query_tgt;
2959 struct ibmvfc_host *vhost = tgt->vhost;
2960 struct ibmvfc_event *evt;
2961
2962 if (vhost->discovery_threads >= disc_threads)
2963 return;
2964
2965 kref_get(&tgt->kref);
2966 evt = ibmvfc_get_event(vhost);
2967 vhost->discovery_threads++;
2968 evt->tgt = tgt;
2969 ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
2970 query_tgt = &evt->iu.query_tgt;
2971 memset(query_tgt, 0, sizeof(*query_tgt));
2972 query_tgt->common.version = 1;
2973 query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
2974 query_tgt->common.length = sizeof(*query_tgt);
2975 query_tgt->wwpn = tgt->ids.port_name;
2976
2977 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2978 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2979 vhost->discovery_threads--;
2980 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2981 kref_put(&tgt->kref, ibmvfc_release_tgt);
2982 } else
2983 tgt_dbg(tgt, "Sent Query Target\n");
2984}
2985
2986/**
2987 * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
2988 * @vhost: ibmvfc host struct
2989 * @scsi_id: SCSI ID to allocate target for
2990 *
2991 * Returns:
2992 * 0 on success / other on failure
2993 **/
2994static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
2995{
2996 struct ibmvfc_target *tgt;
2997 unsigned long flags;
2998
2999 spin_lock_irqsave(vhost->host->host_lock, flags);
3000 list_for_each_entry(tgt, &vhost->targets, queue) {
3001 if (tgt->scsi_id == scsi_id) {
3002 if (tgt->need_login)
3003 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3004 goto unlock_out;
3005 }
3006 }
3007 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3008
3009 tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
3010 if (!tgt) {
3011 dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n",
3012 scsi_id);
3013 return -ENOMEM;
3014 }
3015
3016 tgt->scsi_id = scsi_id;
3017 tgt->new_scsi_id = scsi_id;
3018 tgt->vhost = vhost;
3019 tgt->need_login = 1;
3020 kref_init(&tgt->kref);
3021 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3022 spin_lock_irqsave(vhost->host->host_lock, flags);
3023 list_add_tail(&tgt->queue, &vhost->targets);
3024
3025unlock_out:
3026 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3027 return 0;
3028}
3029
3030/**
3031 * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
3032 * @vhost: ibmvfc host struct
3033 *
3034 * Returns:
3035 * 0 on success / other on failure
3036 **/
3037static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
3038{
3039 int i, rc;
3040
3041 for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
3042 rc = ibmvfc_alloc_target(vhost,
3043 vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
3044
3045 return rc;
3046}
3047
3048/**
3049 * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
3050 * @evt: ibmvfc event struct
3051 *
3052 **/
3053static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3054{
3055 struct ibmvfc_host *vhost = evt->vhost;
3056 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3057 u32 mad_status = rsp->common.status;
3058
3059 switch (mad_status) {
3060 case IBMVFC_MAD_SUCCESS:
3061 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
3062 vhost->num_targets = rsp->num_written;
3063 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3064 break;
3065 case IBMVFC_MAD_FAILED:
3066 dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
3067 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3068 ibmvfc_retry_host_init(vhost);
3069 break;
3070 case IBMVFC_MAD_DRIVER_FAILED:
3071 break;
3072 default:
3073 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
3074 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3075 break;
3076 }
3077
3078 ibmvfc_free_event(evt);
3079 wake_up(&vhost->work_wait_q);
3080}
3081
3082/**
3083 * ibmvfc_discover_targets - Send Discover Targets MAD
3084 * @vhost: ibmvfc host struct
3085 *
3086 **/
3087static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
3088{
3089 struct ibmvfc_discover_targets *mad;
3090 struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3091
3092 ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
3093 mad = &evt->iu.discover_targets;
3094 memset(mad, 0, sizeof(*mad));
3095 mad->common.version = 1;
3096 mad->common.opcode = IBMVFC_DISC_TARGETS;
3097 mad->common.length = sizeof(*mad);
3098 mad->bufflen = vhost->disc_buf_sz;
3099 mad->buffer.va = vhost->disc_buf_dma;
3100 mad->buffer.len = vhost->disc_buf_sz;
3101 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3102
3103 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3104 ibmvfc_dbg(vhost, "Sent discover targets\n");
3105 else
3106 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3107}
3108
3109/**
3110 * ibmvfc_npiv_login_done - Completion handler for NPIV Login
3111 * @evt: ibmvfc event struct
3112 *
3113 **/
3114static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3115{
3116 struct ibmvfc_host *vhost = evt->vhost;
3117 u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3118 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3119 unsigned int npiv_max_sectors;
3120
3121 switch (mad_status) {
3122 case IBMVFC_MAD_SUCCESS:
3123 ibmvfc_free_event(evt);
3124 break;
3125 case IBMVFC_MAD_FAILED:
3126 dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
3127 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3128 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3129 ibmvfc_retry_host_init(vhost);
3130 else
3131 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3132 ibmvfc_free_event(evt);
3133 return;
3134 case IBMVFC_MAD_CRQ_ERROR:
3135 ibmvfc_retry_host_init(vhost);
3136 case IBMVFC_MAD_DRIVER_FAILED:
3137 ibmvfc_free_event(evt);
3138 return;
3139 default:
3140 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
3141 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3142 ibmvfc_free_event(evt);
3143 return;
3144 }
3145
3146 vhost->client_migrated = 0;
3147
3148 if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
3149 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
3150 rsp->flags);
3151 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3152 wake_up(&vhost->work_wait_q);
3153 return;
3154 }
3155
3156 if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
3157 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
3158 rsp->max_cmds);
3159 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3160 wake_up(&vhost->work_wait_q);
3161 return;
3162 }
3163
3164 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3165 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3166 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
3167 rsp->drc_name, npiv_max_sectors);
3168
3169 fc_host_fabric_name(vhost->host) = rsp->node_name;
3170 fc_host_node_name(vhost->host) = rsp->node_name;
3171 fc_host_port_name(vhost->host) = rsp->port_name;
3172 fc_host_port_id(vhost->host) = rsp->scsi_id;
3173 fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
3174 fc_host_supported_classes(vhost->host) = 0;
3175 if (rsp->service_parms.class1_parms[0] & 0x80000000)
3176 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
3177 if (rsp->service_parms.class2_parms[0] & 0x80000000)
3178 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
3179 if (rsp->service_parms.class3_parms[0] & 0x80000000)
3180 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
3181 fc_host_maxframe_size(vhost->host) =
3182 rsp->service_parms.common.bb_rcv_sz & 0x0fff;
3183
3184 vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
3185 vhost->host->max_sectors = npiv_max_sectors;
3186 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3187 wake_up(&vhost->work_wait_q);
3188}
3189
3190/**
3191 * ibmvfc_npiv_login - Sends NPIV login
3192 * @vhost: ibmvfc host struct
3193 *
3194 **/
3195static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3196{
3197 struct ibmvfc_npiv_login_mad *mad;
3198 struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3199
3200 ibmvfc_gather_partition_info(vhost);
3201 ibmvfc_set_login_info(vhost);
3202 ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
3203
3204 memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
3205 mad = &evt->iu.npiv_login;
3206 memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
3207 mad->common.version = 1;
3208 mad->common.opcode = IBMVFC_NPIV_LOGIN;
3209 mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
3210 mad->buffer.va = vhost->login_buf_dma;
3211 mad->buffer.len = sizeof(*vhost->login_buf);
3212
3213 memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
3214 vhost->async_crq.cur = 0;
3215 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3216
3217 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3218 ibmvfc_dbg(vhost, "Sent NPIV login\n");
3219 else
3220 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3221};
3222
3223/**
3224 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
3225 * @vhost: ibmvfc host struct
3226 *
3227 * Returns:
3228 * 1 if work to do / 0 if not
3229 **/
3230static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
3231{
3232 struct ibmvfc_target *tgt;
3233
3234 list_for_each_entry(tgt, &vhost->targets, queue) {
3235 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
3236 tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3237 return 1;
3238 }
3239
3240 return 0;
3241}
3242
3243/**
3244 * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
3245 * @vhost: ibmvfc host struct
3246 *
3247 * Returns:
3248 * 1 if work to do / 0 if not
3249 **/
3250static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3251{
3252 struct ibmvfc_target *tgt;
3253
3254 if (kthread_should_stop())
3255 return 1;
3256 switch (vhost->action) {
3257 case IBMVFC_HOST_ACTION_NONE:
3258 case IBMVFC_HOST_ACTION_INIT_WAIT:
3259 return 0;
3260 case IBMVFC_HOST_ACTION_TGT_INIT:
3261 case IBMVFC_HOST_ACTION_QUERY_TGTS:
3262 if (vhost->discovery_threads == disc_threads)
3263 return 0;
3264 list_for_each_entry(tgt, &vhost->targets, queue)
3265 if (tgt->action == IBMVFC_TGT_ACTION_INIT)
3266 return 1;
3267 list_for_each_entry(tgt, &vhost->targets, queue)
3268 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3269 return 0;
3270 return 1;
3271 case IBMVFC_HOST_ACTION_INIT:
3272 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3273 case IBMVFC_HOST_ACTION_TGT_ADD:
3274 case IBMVFC_HOST_ACTION_TGT_DEL:
3275 case IBMVFC_HOST_ACTION_QUERY:
3276 default:
3277 break;
3278 };
3279
3280 return 1;
3281}
3282
3283/**
3284 * ibmvfc_work_to_do - Is there task level work to do?
3285 * @vhost: ibmvfc host struct
3286 *
3287 * Returns:
3288 * 1 if work to do / 0 if not
3289 **/
3290static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3291{
3292 unsigned long flags;
3293 int rc;
3294
3295 spin_lock_irqsave(vhost->host->host_lock, flags);
3296 rc = __ibmvfc_work_to_do(vhost);
3297 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3298 return rc;
3299}
3300
3301/**
3302 * ibmvfc_log_ae - Log async events if necessary
3303 * @vhost: ibmvfc host struct
3304 * @events: events to log
3305 *
3306 **/
3307static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
3308{
3309 if (events & IBMVFC_AE_RSCN)
3310 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
3311 if ((events & IBMVFC_AE_LINKDOWN) &&
3312 vhost->state >= IBMVFC_HALTED)
3313 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
3314 if ((events & IBMVFC_AE_LINKUP) &&
3315 vhost->state == IBMVFC_INITIALIZING)
3316 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
3317}
3318
3319/**
3320 * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
3321 * @tgt: ibmvfc target struct
3322 *
3323 **/
3324static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3325{
3326 struct ibmvfc_host *vhost = tgt->vhost;
3327 struct fc_rport *rport;
3328 unsigned long flags;
3329
3330 tgt_dbg(tgt, "Adding rport\n");
3331 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3332 spin_lock_irqsave(vhost->host->host_lock, flags);
3333 tgt->rport = rport;
3334 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3335 if (rport) {
3336 tgt_dbg(tgt, "rport add succeeded\n");
3337 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3338 rport->supported_classes = 0;
3339 if (tgt->service_parms.class1_parms[0] & 0x80000000)
3340 rport->supported_classes |= FC_COS_CLASS1;
3341 if (tgt->service_parms.class2_parms[0] & 0x80000000)
3342 rport->supported_classes |= FC_COS_CLASS2;
3343 if (tgt->service_parms.class3_parms[0] & 0x80000000)
3344 rport->supported_classes |= FC_COS_CLASS3;
3345 } else
3346 tgt_dbg(tgt, "rport add failed\n");
3347 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3348}
3349
3350/**
3351 * ibmvfc_do_work - Do task level work
3352 * @vhost: ibmvfc host struct
3353 *
3354 **/
3355static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3356{
3357 struct ibmvfc_target *tgt;
3358 unsigned long flags;
3359 struct fc_rport *rport;
3360
3361 ibmvfc_log_ae(vhost, vhost->events_to_log);
3362 spin_lock_irqsave(vhost->host->host_lock, flags);
3363 vhost->events_to_log = 0;
3364 switch (vhost->action) {
3365 case IBMVFC_HOST_ACTION_NONE:
3366 case IBMVFC_HOST_ACTION_INIT_WAIT:
3367 break;
3368 case IBMVFC_HOST_ACTION_INIT:
3369 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3370 vhost->job_step(vhost);
3371 break;
3372 case IBMVFC_HOST_ACTION_QUERY:
3373 list_for_each_entry(tgt, &vhost->targets, queue)
3374 ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
3375 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
3376 break;
3377 case IBMVFC_HOST_ACTION_QUERY_TGTS:
3378 list_for_each_entry(tgt, &vhost->targets, queue) {
3379 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
3380 tgt->job_step(tgt);
3381 break;
3382 }
3383 }
3384
3385 if (!ibmvfc_dev_init_to_do(vhost))
3386 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
3387 break;
3388 case IBMVFC_HOST_ACTION_TGT_DEL:
3389 list_for_each_entry(tgt, &vhost->targets, queue) {
3390 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3391 tgt_dbg(tgt, "Deleting rport\n");
3392 rport = tgt->rport;
3393 tgt->rport = NULL;
3394 list_del(&tgt->queue);
3395 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3396 if (rport)
3397 fc_remote_port_delete(rport);
3398 kref_put(&tgt->kref, ibmvfc_release_tgt);
3399 return;
3400 }
3401 }
3402
3403 if (vhost->state == IBMVFC_INITIALIZING) {
3404 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
3405 vhost->job_step = ibmvfc_discover_targets;
3406 } else {
3407 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3408 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3409 scsi_unblock_requests(vhost->host);
3410 wake_up(&vhost->init_wait_q);
3411 return;
3412 }
3413 break;
3414 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3415 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
3416 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3417 ibmvfc_alloc_targets(vhost);
3418 spin_lock_irqsave(vhost->host->host_lock, flags);
3419 break;
3420 case IBMVFC_HOST_ACTION_TGT_INIT:
3421 list_for_each_entry(tgt, &vhost->targets, queue) {
3422 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
3423 tgt->job_step(tgt);
3424 break;
3425 }
3426 }
3427
3428 if (!ibmvfc_dev_init_to_do(vhost)) {
3429 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3430 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
3431 vhost->init_retries = 0;
3432 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3433 scsi_unblock_requests(vhost->host);
3434 return;
3435 }
3436 break;
3437 case IBMVFC_HOST_ACTION_TGT_ADD:
3438 list_for_each_entry(tgt, &vhost->targets, queue) {
3439 if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
3440 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3441 ibmvfc_tgt_add_rport(tgt);
3442 return;
3443 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3444 tgt_dbg(tgt, "Deleting rport\n");
3445 rport = tgt->rport;
3446 tgt->rport = NULL;
3447 list_del(&tgt->queue);
3448 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3449 if (rport)
3450 fc_remote_port_delete(rport);
3451 kref_put(&tgt->kref, ibmvfc_release_tgt);
3452 return;
3453 }
3454 }
3455
3456 if (vhost->reinit) {
3457 vhost->reinit = 0;
3458 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3459 } else {
3460 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3461 wake_up(&vhost->init_wait_q);
3462 }
3463 break;
3464 default:
3465 break;
3466 };
3467
3468 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3469}
3470
3471/**
3472 * ibmvfc_work - Do task level work
3473 * @data: ibmvfc host struct
3474 *
3475 * Returns:
3476 * zero
3477 **/
3478static int ibmvfc_work(void *data)
3479{
3480 struct ibmvfc_host *vhost = data;
3481 int rc;
3482
3483 set_user_nice(current, -20);
3484
3485 while (1) {
3486 rc = wait_event_interruptible(vhost->work_wait_q,
3487 ibmvfc_work_to_do(vhost));
3488
3489 BUG_ON(rc);
3490
3491 if (kthread_should_stop())
3492 break;
3493
3494 ibmvfc_do_work(vhost);
3495 }
3496
3497 ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
3498 return 0;
3499}
3500
3501/**
3502 * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
3503 * @vhost: ibmvfc host struct
3504 *
3505 * Allocates a page for messages, maps it for dma, and registers
3506 * the crq with the hypervisor.
3507 *
3508 * Return value:
3509 * zero on success / other on failure
3510 **/
3511static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
3512{
3513 int rc, retrc = -ENOMEM;
3514 struct device *dev = vhost->dev;
3515 struct vio_dev *vdev = to_vio_dev(dev);
3516 struct ibmvfc_crq_queue *crq = &vhost->crq;
3517
3518 ENTER;
3519 crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
3520
3521 if (!crq->msgs)
3522 return -ENOMEM;
3523
3524 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3525 crq->msg_token = dma_map_single(dev, crq->msgs,
3526 PAGE_SIZE, DMA_BIDIRECTIONAL);
3527
3528 if (dma_mapping_error(crq->msg_token))
3529 goto map_failed;
3530
3531 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3532 crq->msg_token, PAGE_SIZE);
3533
3534 if (rc == H_RESOURCE)
3535 /* maybe kexecing and resource is busy. try a reset */
3536 retrc = rc = ibmvfc_reset_crq(vhost);
3537
3538 if (rc == H_CLOSED)
3539 dev_warn(dev, "Partner adapter not ready\n");
3540 else if (rc) {
3541 dev_warn(dev, "Error %d opening adapter\n", rc);
3542 goto reg_crq_failed;
3543 }
3544
3545 retrc = 0;
3546
3547 if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
3548 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
3549 goto req_irq_failed;
3550 }
3551
3552 if ((rc = vio_enable_interrupts(vdev))) {
3553 dev_err(dev, "Error %d enabling interrupts\n", rc);
3554 goto req_irq_failed;
3555 }
3556
3557 crq->cur = 0;
3558 LEAVE;
3559 return retrc;
3560
3561req_irq_failed:
3562 do {
3563 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3564 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3565reg_crq_failed:
3566 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3567map_failed:
3568 free_page((unsigned long)crq->msgs);
3569 return retrc;
3570}
3571
3572/**
3573 * ibmvfc_free_mem - Free memory for vhost
3574 * @vhost: ibmvfc host struct
3575 *
3576 * Return value:
3577 * none
3578 **/
3579static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
3580{
3581 struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
3582
3583 ENTER;
3584 mempool_destroy(vhost->tgt_pool);
3585 kfree(vhost->trace);
3586 dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
3587 vhost->disc_buf_dma);
3588 dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
3589 vhost->login_buf, vhost->login_buf_dma);
3590 dma_pool_destroy(vhost->sg_pool);
3591 dma_unmap_single(vhost->dev, async_q->msg_token,
3592 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
3593 free_page((unsigned long)async_q->msgs);
3594 LEAVE;
3595}
3596
3597/**
3598 * ibmvfc_alloc_mem - Allocate memory for vhost
3599 * @vhost: ibmvfc host struct
3600 *
3601 * Return value:
3602 * 0 on success / non-zero on failure
3603 **/
3604static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
3605{
3606 struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
3607 struct device *dev = vhost->dev;
3608
3609 ENTER;
3610 async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
3611 if (!async_q->msgs) {
3612 dev_err(dev, "Couldn't allocate async queue.\n");
3613 goto nomem;
3614 }
3615
3616 async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
3617 async_q->msg_token = dma_map_single(dev, async_q->msgs,
3618 async_q->size * sizeof(*async_q->msgs),
3619 DMA_BIDIRECTIONAL);
3620
3621 if (dma_mapping_error(async_q->msg_token)) {
3622 dev_err(dev, "Failed to map async queue\n");
3623 goto free_async_crq;
3624 }
3625
3626 vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
3627 SG_ALL * sizeof(struct srp_direct_buf),
3628 sizeof(struct srp_direct_buf), 0);
3629
3630 if (!vhost->sg_pool) {
3631 dev_err(dev, "Failed to allocate sg pool\n");
3632 goto unmap_async_crq;
3633 }
3634
3635 vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
3636 &vhost->login_buf_dma, GFP_KERNEL);
3637
3638 if (!vhost->login_buf) {
3639 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
3640 goto free_sg_pool;
3641 }
3642
3643 vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
3644 vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
3645 &vhost->disc_buf_dma, GFP_KERNEL);
3646
3647 if (!vhost->disc_buf) {
3648 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
3649 goto free_login_buffer;
3650 }
3651
3652 vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
3653 sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
3654
3655 if (!vhost->trace)
3656 goto free_disc_buffer;
3657
3658 vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
3659 sizeof(struct ibmvfc_target));
3660
3661 if (!vhost->tgt_pool) {
3662 dev_err(dev, "Couldn't allocate target memory pool\n");
3663 goto free_trace;
3664 }
3665
3666 LEAVE;
3667 return 0;
3668
3669free_trace:
3670 kfree(vhost->trace);
3671free_disc_buffer:
3672 dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
3673 vhost->disc_buf_dma);
3674free_login_buffer:
3675 dma_free_coherent(dev, sizeof(*vhost->login_buf),
3676 vhost->login_buf, vhost->login_buf_dma);
3677free_sg_pool:
3678 dma_pool_destroy(vhost->sg_pool);
3679unmap_async_crq:
3680 dma_unmap_single(dev, async_q->msg_token,
3681 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
3682free_async_crq:
3683 free_page((unsigned long)async_q->msgs);
3684nomem:
3685 LEAVE;
3686 return -ENOMEM;
3687}
3688
3689/**
3690 * ibmvfc_probe - Adapter hot plug add entry point
3691 * @vdev: vio device struct
3692 * @id: vio device id struct
3693 *
3694 * Return value:
3695 * 0 on success / non-zero on failure
3696 **/
3697static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
3698{
3699 struct ibmvfc_host *vhost;
3700 struct Scsi_Host *shost;
3701 struct device *dev = &vdev->dev;
3702 int rc = -ENOMEM;
3703
3704 ENTER;
3705 shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
3706 if (!shost) {
3707 dev_err(dev, "Couldn't allocate host data\n");
3708 goto out;
3709 }
3710
3711 shost->transportt = ibmvfc_transport_template;
3712 shost->can_queue = max_requests;
3713 shost->max_lun = max_lun;
3714 shost->max_id = max_targets;
3715 shost->max_sectors = IBMVFC_MAX_SECTORS;
3716 shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
3717 shost->unique_id = shost->host_no;
3718
3719 vhost = shost_priv(shost);
3720 INIT_LIST_HEAD(&vhost->sent);
3721 INIT_LIST_HEAD(&vhost->free);
3722 INIT_LIST_HEAD(&vhost->targets);
3723 sprintf(vhost->name, IBMVFC_NAME);
3724 vhost->host = shost;
3725 vhost->dev = dev;
3726 vhost->partition_number = -1;
3727 vhost->log_level = log_level;
3728 strcpy(vhost->partition_name, "UNKNOWN");
3729 init_waitqueue_head(&vhost->work_wait_q);
3730 init_waitqueue_head(&vhost->init_wait_q);
3731
3732 if ((rc = ibmvfc_alloc_mem(vhost)))
3733 goto free_scsi_host;
3734
3735 vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
3736 shost->host_no);
3737
3738 if (IS_ERR(vhost->work_thread)) {
3739 dev_err(dev, "Couldn't create kernel thread: %ld\n",
3740 PTR_ERR(vhost->work_thread));
3741 goto free_host_mem;
3742 }
3743
3744 if ((rc = ibmvfc_init_crq(vhost))) {
3745 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3746 goto kill_kthread;
3747 }
3748
3749 if ((rc = ibmvfc_init_event_pool(vhost))) {
3750 dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
3751 goto release_crq;
3752 }
3753
3754 if ((rc = scsi_add_host(shost, dev)))
3755 goto release_event_pool;
3756
3757 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
3758 &ibmvfc_trace_attr))) {
3759 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
3760 goto remove_shost;
3761 }
3762
3763 dev_set_drvdata(dev, vhost);
3764 spin_lock(&ibmvfc_driver_lock);
3765 list_add_tail(&vhost->queue, &ibmvfc_head);
3766 spin_unlock(&ibmvfc_driver_lock);
3767
3768 ibmvfc_send_crq_init(vhost);
3769 scsi_scan_host(shost);
3770 return 0;
3771
3772remove_shost:
3773 scsi_remove_host(shost);
3774release_event_pool:
3775 ibmvfc_free_event_pool(vhost);
3776release_crq:
3777 ibmvfc_release_crq_queue(vhost);
3778kill_kthread:
3779 kthread_stop(vhost->work_thread);
3780free_host_mem:
3781 ibmvfc_free_mem(vhost);
3782free_scsi_host:
3783 scsi_host_put(shost);
3784out:
3785 LEAVE;
3786 return rc;
3787}
3788
3789/**
3790 * ibmvfc_remove - Adapter hot plug remove entry point
3791 * @vdev: vio device struct
3792 *
3793 * Return value:
3794 * 0
3795 **/
3796static int ibmvfc_remove(struct vio_dev *vdev)
3797{
3798 struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
3799 unsigned long flags;
3800
3801 ENTER;
3802 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
3803 kthread_stop(vhost->work_thread);
3804 fc_remove_host(vhost->host);
3805 scsi_remove_host(vhost->host);
3806 ibmvfc_release_crq_queue(vhost);
3807
3808 spin_lock_irqsave(vhost->host->host_lock, flags);
3809 ibmvfc_purge_requests(vhost, DID_ERROR);
3810 ibmvfc_free_event_pool(vhost);
3811 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3812
3813 ibmvfc_free_mem(vhost);
3814 spin_lock(&ibmvfc_driver_lock);
3815 list_del(&vhost->queue);
3816 spin_unlock(&ibmvfc_driver_lock);
3817 scsi_host_put(vhost->host);
3818 LEAVE;
3819 return 0;
3820}
3821
3822static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
3823 {"fcp", "IBM,vfc-client"},
3824 { "", "" }
3825};
3826MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
3827
3828static struct vio_driver ibmvfc_driver = {
3829 .id_table = ibmvfc_device_table,
3830 .probe = ibmvfc_probe,
3831 .remove = ibmvfc_remove,
3832 .driver = {
3833 .name = IBMVFC_NAME,
3834 .owner = THIS_MODULE,
3835 }
3836};
3837
3838static struct fc_function_template ibmvfc_transport_functions = {
3839 .show_host_fabric_name = 1,
3840 .show_host_node_name = 1,
3841 .show_host_port_name = 1,
3842 .show_host_supported_classes = 1,
3843 .show_host_port_type = 1,
3844 .show_host_port_id = 1,
3845
3846 .get_host_port_state = ibmvfc_get_host_port_state,
3847 .show_host_port_state = 1,
3848
3849 .get_host_speed = ibmvfc_get_host_speed,
3850 .show_host_speed = 1,
3851
3852 .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
3853 .terminate_rport_io = ibmvfc_terminate_rport_io,
3854
3855 .show_rport_maxframe_size = 1,
3856 .show_rport_supported_classes = 1,
3857
3858 .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
3859 .show_rport_dev_loss_tmo = 1,
3860
3861 .get_starget_node_name = ibmvfc_get_starget_node_name,
3862 .show_starget_node_name = 1,
3863
3864 .get_starget_port_name = ibmvfc_get_starget_port_name,
3865 .show_starget_port_name = 1,
3866
3867 .get_starget_port_id = ibmvfc_get_starget_port_id,
3868 .show_starget_port_id = 1,
3869};
3870
3871/**
3872 * ibmvfc_module_init - Initialize the ibmvfc module
3873 *
3874 * Return value:
3875 * 0 on success / other on failure
3876 **/
3877static int __init ibmvfc_module_init(void)
3878{
3879 int rc;
3880
3881 if (!firmware_has_feature(FW_FEATURE_VIO))
3882 return -ENODEV;
3883
3884 printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
3885 IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
3886
3887 ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
3888 if (!ibmvfc_transport_template)
3889 return -ENOMEM;
3890
3891 rc = vio_register_driver(&ibmvfc_driver);
3892 if (rc)
3893 fc_release_transport(ibmvfc_transport_template);
3894 return rc;
3895}
3896
3897/**
3898 * ibmvfc_module_exit - Teardown the ibmvfc module
3899 *
3900 * Return value:
3901 * nothing
3902 **/
3903static void __exit ibmvfc_module_exit(void)
3904{
3905 vio_unregister_driver(&ibmvfc_driver);
3906 fc_release_transport(ibmvfc_transport_template);
3907}
3908
3909module_init(ibmvfc_module_init);
3910module_exit(ibmvfc_module_exit);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
new file mode 100644
index 000000000000..057f3c01ed61
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -0,0 +1,682 @@
1/*
2 * ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter
3 *
4 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) IBM Corporation, 2008
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#ifndef _IBMVFC_H
25#define _IBMVFC_H
26
27#include <linux/list.h>
28#include <linux/types.h>
29#include "viosrp.h"
30
31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.0"
33#define IBMVFC_DRIVER_DATE "(July 1, 2008)"
34
35#define IBMVFC_DEFAULT_TIMEOUT 15
36#define IBMVFC_INIT_TIMEOUT 30
37#define IBMVFC_MAX_REQUESTS_DEFAULT 100
38
39#define IBMVFC_DEBUG 0
40#define IBMVFC_MAX_TARGETS 1024
41#define IBMVFC_MAX_LUN 0xffffffff
42#define IBMVFC_MAX_SECTORS 0xffffu
43#define IBMVFC_MAX_DISC_THREADS 4
44#define IBMVFC_TGT_MEMPOOL_SZ 64
45#define IBMVFC_MAX_CMDS_PER_LUN 64
46#define IBMVFC_MAX_INIT_RETRIES 3
47#define IBMVFC_DEV_LOSS_TMO (5 * 60)
48#define IBMVFC_DEFAULT_LOG_LEVEL 2
49#define IBMVFC_MAX_CDB_LEN 16
50
51/*
52 * Ensure we have resources for ERP and initialization:
53 * 1 for ERP
54 * 1 for initialization
55 * 1 for each discovery thread
56 */
57#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + disc_threads)
58
59#define IBMVFC_MAD_SUCCESS 0x00
60#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
61#define IBMVFC_MAD_FAILED 0xF7
62#define IBMVFC_MAD_DRIVER_FAILED 0xEE
63#define IBMVFC_MAD_CRQ_ERROR 0xEF
64
65enum ibmvfc_crq_valid {
66 IBMVFC_CRQ_CMD_RSP = 0x80,
67 IBMVFC_CRQ_INIT_RSP = 0xC0,
68 IBMVFC_CRQ_XPORT_EVENT = 0xFF,
69};
70
71enum ibmvfc_crq_format {
72 IBMVFC_CRQ_INIT = 0x01,
73 IBMVFC_CRQ_INIT_COMPLETE = 0x02,
74 IBMVFC_PARTITION_MIGRATED = 0x06,
75};
76
77enum ibmvfc_cmd_status_flags {
78 IBMVFC_FABRIC_MAPPED = 0x0001,
79 IBMVFC_VIOS_FAILURE = 0x0002,
80 IBMVFC_FC_FAILURE = 0x0004,
81 IBMVFC_FC_SCSI_ERROR = 0x0008,
82 IBMVFC_HW_EVENT_LOGGED = 0x0010,
83 IBMVFC_VIOS_LOGGED = 0x0020,
84};
85
86enum ibmvfc_fabric_mapped_errors {
87 IBMVFC_UNABLE_TO_ESTABLISH = 0x0001,
88 IBMVFC_XPORT_FAULT = 0x0002,
89 IBMVFC_CMD_TIMEOUT = 0x0003,
90 IBMVFC_ENETDOWN = 0x0004,
91 IBMVFC_HW_FAILURE = 0x0005,
92 IBMVFC_LINK_DOWN_ERR = 0x0006,
93 IBMVFC_LINK_DEAD_ERR = 0x0007,
94 IBMVFC_UNABLE_TO_REGISTER = 0x0008,
95 IBMVFC_XPORT_BUSY = 0x000A,
96 IBMVFC_XPORT_DEAD = 0x000B,
97 IBMVFC_CONFIG_ERROR = 0x000C,
98 IBMVFC_NAME_SERVER_FAIL = 0x000D,
99 IBMVFC_LINK_HALTED = 0x000E,
100 IBMVFC_XPORT_GENERAL = 0x8000,
101};
102
103enum ibmvfc_vios_errors {
104 IBMVFC_CRQ_FAILURE = 0x0001,
105 IBMVFC_SW_FAILURE = 0x0002,
106 IBMVFC_INVALID_PARAMETER = 0x0003,
107 IBMVFC_MISSING_PARAMETER = 0x0004,
108 IBMVFC_HOST_IO_BUS = 0x0005,
109 IBMVFC_TRANS_CANCELLED = 0x0006,
110 IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
111 IBMVFC_INSUFFICIENT_RESOURCE = 0x0008,
112 IBMVFC_COMMAND_FAILED = 0x8000,
113};
114
115enum ibmvfc_mad_types {
116 IBMVFC_NPIV_LOGIN = 0x0001,
117 IBMVFC_DISC_TARGETS = 0x0002,
118 IBMVFC_PORT_LOGIN = 0x0004,
119 IBMVFC_PROCESS_LOGIN = 0x0008,
120 IBMVFC_QUERY_TARGET = 0x0010,
121 IBMVFC_IMPLICIT_LOGOUT = 0x0040,
122 IBMVFC_TMF_MAD = 0x0100,
123};
124
125struct ibmvfc_mad_common {
126 u32 version;
127 u32 reserved;
128 u32 opcode;
129 u16 status;
130 u16 length;
131 u64 tag;
132}__attribute__((packed, aligned (8)));
133
134struct ibmvfc_npiv_login_mad {
135 struct ibmvfc_mad_common common;
136 struct srp_direct_buf buffer;
137}__attribute__((packed, aligned (8)));
138
139#define IBMVFC_MAX_NAME 256
140
141struct ibmvfc_npiv_login {
142 u32 ostype;
143#define IBMVFC_OS_LINUX 0x02
144 u32 pad;
145 u64 max_dma_len;
146 u32 max_payload;
147 u32 max_response;
148 u32 partition_num;
149 u32 vfc_frame_version;
150 u16 fcp_version;
151 u16 flags;
152#define IBMVFC_CLIENT_MIGRATED 0x01
153#define IBMVFC_FLUSH_ON_HALT 0x02
154 u32 max_cmds;
155 u64 capabilities;
156#define IBMVFC_CAN_MIGRATE 0x01
157 u64 node_name;
158 struct srp_direct_buf async;
159 u8 partition_name[IBMVFC_MAX_NAME];
160 u8 device_name[IBMVFC_MAX_NAME];
161 u8 drc_name[IBMVFC_MAX_NAME];
162 u64 reserved2[2];
163}__attribute__((packed, aligned (8)));
164
165struct ibmvfc_common_svc_parms {
166 u16 fcph_version;
167 u16 b2b_credit;
168 u16 features;
169 u16 bb_rcv_sz; /* upper nibble is BB_SC_N */
170 u32 ratov;
171 u32 edtov;
172}__attribute__((packed, aligned (4)));
173
174struct ibmvfc_service_parms {
175 struct ibmvfc_common_svc_parms common;
176 u8 port_name[8];
177 u8 node_name[8];
178 u32 class1_parms[4];
179 u32 class2_parms[4];
180 u32 class3_parms[4];
181 u32 obsolete[4];
182 u32 vendor_version[4];
183 u32 services_avail[2];
184 u32 ext_len;
185 u32 reserved[30];
186 u32 clk_sync_qos[2];
187}__attribute__((packed, aligned (4)));
188
189struct ibmvfc_npiv_login_resp {
190 u32 version;
191 u16 status;
192 u16 error;
193 u32 flags;
194#define IBMVFC_NATIVE_FC 0x01
195#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
196 u32 reserved;
197 u64 capabilites;
198 u32 max_cmds;
199 u32 scsi_id_sz;
200 u64 max_dma_len;
201 u64 scsi_id;
202 u64 port_name;
203 u64 node_name;
204 u64 link_speed;
205 u8 partition_name[IBMVFC_MAX_NAME];
206 u8 device_name[IBMVFC_MAX_NAME];
207 u8 port_loc_code[IBMVFC_MAX_NAME];
208 u8 drc_name[IBMVFC_MAX_NAME];
209 struct ibmvfc_service_parms service_parms;
210 u64 reserved2;
211}__attribute__((packed, aligned (8)));
212
213union ibmvfc_npiv_login_data {
214 struct ibmvfc_npiv_login login;
215 struct ibmvfc_npiv_login_resp resp;
216}__attribute__((packed, aligned (8)));
217
218struct ibmvfc_discover_targets_buf {
219 u32 scsi_id[1];
220#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
221};
222
223struct ibmvfc_discover_targets {
224 struct ibmvfc_mad_common common;
225 struct srp_direct_buf buffer;
226 u32 flags;
227 u16 status;
228 u16 error;
229 u32 bufflen;
230 u32 num_avail;
231 u32 num_written;
232 u64 reserved[2];
233}__attribute__((packed, aligned (8)));
234
235enum ibmvfc_fc_reason {
236 IBMVFC_INVALID_ELS_CMD_CODE = 0x01,
237 IBMVFC_INVALID_VERSION = 0x02,
238 IBMVFC_LOGICAL_ERROR = 0x03,
239 IBMVFC_INVALID_CT_IU_SIZE = 0x04,
240 IBMVFC_LOGICAL_BUSY = 0x05,
241 IBMVFC_PROTOCOL_ERROR = 0x07,
242 IBMVFC_UNABLE_TO_PERFORM_REQ = 0x09,
243 IBMVFC_CMD_NOT_SUPPORTED = 0x0B,
244 IBMVFC_SERVER_NOT_AVAIL = 0x0D,
245 IBMVFC_CMD_IN_PROGRESS = 0x0E,
246 IBMVFC_VENDOR_SPECIFIC = 0xFF,
247};
248
249enum ibmvfc_fc_type {
250 IBMVFC_FABRIC_REJECT = 0x01,
251 IBMVFC_PORT_REJECT = 0x02,
252 IBMVFC_LS_REJECT = 0x03,
253 IBMVFC_FABRIC_BUSY = 0x04,
254 IBMVFC_PORT_BUSY = 0x05,
255 IBMVFC_BASIC_REJECT = 0x06,
256};
257
258enum ibmvfc_gs_explain {
259 IBMVFC_PORT_NAME_NOT_REG = 0x02,
260};
261
262struct ibmvfc_port_login {
263 struct ibmvfc_mad_common common;
264 u64 scsi_id;
265 u16 reserved;
266 u16 fc_service_class;
267 u32 blksz;
268 u32 hdr_per_blk;
269 u16 status;
270 u16 error; /* also fc_reason */
271 u16 fc_explain;
272 u16 fc_type;
273 u32 reserved2;
274 struct ibmvfc_service_parms service_parms;
275 struct ibmvfc_service_parms service_parms_change;
276 u64 reserved3[2];
277}__attribute__((packed, aligned (8)));
278
279struct ibmvfc_prli_svc_parms {
280 u8 type;
281#define IBMVFC_SCSI_FCP_TYPE 0x08
282 u8 type_ext;
283 u16 flags;
284#define IBMVFC_PRLI_ORIG_PA_VALID 0x8000
285#define IBMVFC_PRLI_RESP_PA_VALID 0x4000
286#define IBMVFC_PRLI_EST_IMG_PAIR 0x2000
287 u32 orig_pa;
288 u32 resp_pa;
289 u32 service_parms;
290#define IBMVFC_PRLI_TASK_RETRY 0x00000200
291#define IBMVFC_PRLI_RETRY 0x00000100
292#define IBMVFC_PRLI_DATA_OVERLAY 0x00000040
293#define IBMVFC_PRLI_INITIATOR_FUNC 0x00000020
294#define IBMVFC_PRLI_TARGET_FUNC 0x00000010
295#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
296#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001
297}__attribute__((packed, aligned (4)));
298
299struct ibmvfc_process_login {
300 struct ibmvfc_mad_common common;
301 u64 scsi_id;
302 struct ibmvfc_prli_svc_parms parms;
303 u8 reserved[48];
304 u16 status;
305 u16 error; /* also fc_reason */
306 u32 reserved2;
307 u64 reserved3[2];
308}__attribute__((packed, aligned (8)));
309
310struct ibmvfc_query_tgt {
311 struct ibmvfc_mad_common common;
312 u64 wwpn;
313 u64 scsi_id;
314 u16 status;
315 u16 error;
316 u16 fc_explain;
317 u16 fc_type;
318 u64 reserved[2];
319}__attribute__((packed, aligned (8)));
320
321struct ibmvfc_implicit_logout {
322 struct ibmvfc_mad_common common;
323 u64 old_scsi_id;
324 u64 reserved[2];
325}__attribute__((packed, aligned (8)));
326
327struct ibmvfc_tmf {
328 struct ibmvfc_mad_common common;
329 u64 scsi_id;
330 struct scsi_lun lun;
331 u32 flags;
332#define IBMVFC_TMF_ABORT_TASK 0x02
333#define IBMVFC_TMF_ABORT_TASK_SET 0x04
334#define IBMVFC_TMF_LUN_RESET 0x10
335#define IBMVFC_TMF_TGT_RESET 0x20
336#define IBMVFC_TMF_LUA_VALID 0x40
337 u32 cancel_key;
338 u32 my_cancel_key;
339#define IBMVFC_TMF_CANCEL_KEY 0x80000000
340 u32 pad;
341 u64 reserved[2];
342}__attribute__((packed, aligned (8)));
343
344enum ibmvfc_fcp_rsp_info_codes {
345 RSP_NO_FAILURE = 0x00,
346 RSP_TMF_REJECTED = 0x04,
347 RSP_TMF_FAILED = 0x05,
348 RSP_TMF_INVALID_LUN = 0x09,
349};
350
351struct ibmvfc_fcp_rsp_info {
352 u16 reserved;
353 u8 rsp_code;
354 u8 reserved2[4];
355}__attribute__((packed, aligned (2)));
356
357enum ibmvfc_fcp_rsp_flags {
358 FCP_BIDI_RSP = 0x80,
359 FCP_BIDI_READ_RESID_UNDER = 0x40,
360 FCP_BIDI_READ_RESID_OVER = 0x20,
361 FCP_CONF_REQ = 0x10,
362 FCP_RESID_UNDER = 0x08,
363 FCP_RESID_OVER = 0x04,
364 FCP_SNS_LEN_VALID = 0x02,
365 FCP_RSP_LEN_VALID = 0x01,
366};
367
368union ibmvfc_fcp_rsp_data {
369 struct ibmvfc_fcp_rsp_info info;
370 u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
371}__attribute__((packed, aligned (8)));
372
373struct ibmvfc_fcp_rsp {
374 u64 reserved;
375 u16 retry_delay_timer;
376 u8 flags;
377 u8 scsi_status;
378 u32 fcp_resid;
379 u32 fcp_sense_len;
380 u32 fcp_rsp_len;
381 union ibmvfc_fcp_rsp_data data;
382}__attribute__((packed, aligned (8)));
383
384enum ibmvfc_cmd_flags {
385 IBMVFC_SCATTERLIST = 0x0001,
386 IBMVFC_NO_MEM_DESC = 0x0002,
387 IBMVFC_READ = 0x0004,
388 IBMVFC_WRITE = 0x0008,
389 IBMVFC_TMF = 0x0080,
390 IBMVFC_CLASS_3_ERR = 0x0100,
391};
392
393enum ibmvfc_fc_task_attr {
394 IBMVFC_SIMPLE_TASK = 0x00,
395 IBMVFC_HEAD_OF_QUEUE = 0x01,
396 IBMVFC_ORDERED_TASK = 0x02,
397 IBMVFC_ACA_TASK = 0x04,
398};
399
400enum ibmvfc_fc_tmf_flags {
401 IBMVFC_ABORT_TASK_SET = 0x02,
402 IBMVFC_LUN_RESET = 0x10,
403 IBMVFC_TARGET_RESET = 0x20,
404};
405
406struct ibmvfc_fcp_cmd_iu {
407 struct scsi_lun lun;
408 u8 crn;
409 u8 pri_task_attr;
410 u8 tmf_flags;
411 u8 add_cdb_len;
412#define IBMVFC_RDDATA 0x02
413#define IBMVFC_WRDATA 0x01
414 u8 cdb[IBMVFC_MAX_CDB_LEN];
415 u32 xfer_len;
416}__attribute__((packed, aligned (4)));
417
418struct ibmvfc_cmd {
419 u64 task_tag;
420 u32 frame_type;
421 u32 payload_len;
422 u32 resp_len;
423 u32 adapter_resid;
424 u16 status;
425 u16 error;
426 u16 flags;
427 u16 response_flags;
428#define IBMVFC_ADAPTER_RESID_VALID 0x01
429 u32 cancel_key;
430 u32 exchange_id;
431 struct srp_direct_buf ext_func;
432 struct srp_direct_buf ioba;
433 struct srp_direct_buf resp;
434 u64 correlation;
435 u64 tgt_scsi_id;
436 u64 tag;
437 u64 reserved3[2];
438 struct ibmvfc_fcp_cmd_iu iu;
439 struct ibmvfc_fcp_rsp rsp;
440}__attribute__((packed, aligned (8)));
441
442struct ibmvfc_trace_start_entry {
443 u32 xfer_len;
444}__attribute__((packed));
445
446struct ibmvfc_trace_end_entry {
447 u16 status;
448 u16 error;
449 u8 fcp_rsp_flags;
450 u8 rsp_code;
451 u8 scsi_status;
452 u8 reserved;
453}__attribute__((packed));
454
455struct ibmvfc_trace_entry {
456 struct ibmvfc_event *evt;
457 u32 time;
458 u32 scsi_id;
459 u32 lun;
460 u8 fmt;
461 u8 op_code;
462 u8 tmf_flags;
463 u8 type;
464#define IBMVFC_TRC_START 0x00
465#define IBMVFC_TRC_END 0xff
466 union {
467 struct ibmvfc_trace_start_entry start;
468 struct ibmvfc_trace_end_entry end;
469 } u;
470}__attribute__((packed, aligned (8)));
471
472enum ibmvfc_crq_formats {
473 IBMVFC_CMD_FORMAT = 0x01,
474 IBMVFC_ASYNC_EVENT = 0x02,
475 IBMVFC_MAD_FORMAT = 0x04,
476};
477
478enum ibmvfc_async_event {
479 IBMVFC_AE_ELS_PLOGI = 0x0001,
480 IBMVFC_AE_ELS_LOGO = 0x0002,
481 IBMVFC_AE_ELS_PRLO = 0x0004,
482 IBMVFC_AE_SCN_NPORT = 0x0008,
483 IBMVFC_AE_SCN_GROUP = 0x0010,
484 IBMVFC_AE_SCN_DOMAIN = 0x0020,
485 IBMVFC_AE_SCN_FABRIC = 0x0040,
486 IBMVFC_AE_LINK_UP = 0x0080,
487 IBMVFC_AE_LINK_DOWN = 0x0100,
488 IBMVFC_AE_LINK_DEAD = 0x0200,
489 IBMVFC_AE_HALT = 0x0400,
490 IBMVFC_AE_RESUME = 0x0800,
491 IBMVFC_AE_ADAPTER_FAILED = 0x1000,
492};
493
494struct ibmvfc_crq {
495 u8 valid;
496 u8 format;
497 u8 reserved[6];
498 u64 ioba;
499}__attribute__((packed, aligned (8)));
500
501struct ibmvfc_crq_queue {
502 struct ibmvfc_crq *msgs;
503 int size, cur;
504 dma_addr_t msg_token;
505};
506
507struct ibmvfc_async_crq {
508 u8 valid;
509 u8 pad[3];
510 u32 pad2;
511 u64 event;
512 u64 scsi_id;
513 u64 wwpn;
514 u64 node_name;
515 u64 reserved;
516}__attribute__((packed, aligned (8)));
517
518struct ibmvfc_async_crq_queue {
519 struct ibmvfc_async_crq *msgs;
520 int size, cur;
521 dma_addr_t msg_token;
522};
523
524union ibmvfc_iu {
525 struct ibmvfc_mad_common mad_common;
526 struct ibmvfc_npiv_login_mad npiv_login;
527 struct ibmvfc_discover_targets discover_targets;
528 struct ibmvfc_port_login plogi;
529 struct ibmvfc_process_login prli;
530 struct ibmvfc_query_tgt query_tgt;
531 struct ibmvfc_implicit_logout implicit_logout;
532 struct ibmvfc_tmf tmf;
533 struct ibmvfc_cmd cmd;
534}__attribute__((packed, aligned (8)));
535
536enum ibmvfc_target_action {
537 IBMVFC_TGT_ACTION_NONE = 0,
538 IBMVFC_TGT_ACTION_INIT,
539 IBMVFC_TGT_ACTION_INIT_WAIT,
540 IBMVFC_TGT_ACTION_ADD_RPORT,
541 IBMVFC_TGT_ACTION_DEL_RPORT,
542};
543
544struct ibmvfc_target {
545 struct list_head queue;
546 struct ibmvfc_host *vhost;
547 u64 scsi_id;
548 u64 new_scsi_id;
549 struct fc_rport *rport;
550 int target_id;
551 enum ibmvfc_target_action action;
552 int need_login;
553 int init_retries;
554 struct ibmvfc_service_parms service_parms;
555 struct ibmvfc_service_parms service_parms_change;
556 struct fc_rport_identifiers ids;
557 void (*job_step) (struct ibmvfc_target *);
558 struct kref kref;
559};
560
561/* a unit of work for the hosting partition */
562struct ibmvfc_event {
563 struct list_head queue;
564 struct ibmvfc_host *vhost;
565 struct ibmvfc_target *tgt;
566 struct scsi_cmnd *cmnd;
567 atomic_t free;
568 union ibmvfc_iu *xfer_iu;
569 void (*done) (struct ibmvfc_event *);
570 struct ibmvfc_crq crq;
571 union ibmvfc_iu iu;
572 union ibmvfc_iu *sync_iu;
573 struct srp_direct_buf *ext_list;
574 dma_addr_t ext_list_token;
575 struct completion comp;
576 struct timer_list timer;
577};
578
579/* a pool of event structs for use */
580struct ibmvfc_event_pool {
581 struct ibmvfc_event *events;
582 u32 size;
583 union ibmvfc_iu *iu_storage;
584 dma_addr_t iu_token;
585};
586
587enum ibmvfc_host_action {
588 IBMVFC_HOST_ACTION_NONE = 0,
589 IBMVFC_HOST_ACTION_INIT,
590 IBMVFC_HOST_ACTION_INIT_WAIT,
591 IBMVFC_HOST_ACTION_QUERY,
592 IBMVFC_HOST_ACTION_QUERY_TGTS,
593 IBMVFC_HOST_ACTION_TGT_DEL,
594 IBMVFC_HOST_ACTION_ALLOC_TGTS,
595 IBMVFC_HOST_ACTION_TGT_INIT,
596 IBMVFC_HOST_ACTION_TGT_ADD,
597};
598
599enum ibmvfc_host_state {
600 IBMVFC_NO_CRQ = 0,
601 IBMVFC_INITIALIZING,
602 IBMVFC_ACTIVE,
603 IBMVFC_HALTED,
604 IBMVFC_LINK_DOWN,
605 IBMVFC_LINK_DEAD,
606 IBMVFC_HOST_OFFLINE,
607};
608
609struct ibmvfc_host {
610 char name[8];
611 struct list_head queue;
612 struct Scsi_Host *host;
613 enum ibmvfc_host_state state;
614 enum ibmvfc_host_action action;
615#define IBMVFC_NUM_TRACE_INDEX_BITS 8
616#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS)
617#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
618 struct ibmvfc_trace_entry *trace;
619 u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS;
620 int num_targets;
621 struct list_head targets;
622 struct list_head sent;
623 struct list_head free;
624 struct device *dev;
625 struct ibmvfc_event_pool pool;
626 struct dma_pool *sg_pool;
627 mempool_t *tgt_pool;
628 struct ibmvfc_crq_queue crq;
629 struct ibmvfc_async_crq_queue async_crq;
630 struct ibmvfc_npiv_login login_info;
631 union ibmvfc_npiv_login_data *login_buf;
632 dma_addr_t login_buf_dma;
633 int disc_buf_sz;
634 int log_level;
635 struct ibmvfc_discover_targets_buf *disc_buf;
636 int task_set;
637 int init_retries;
638 int discovery_threads;
639 int client_migrated;
640 int reinit;
641 int events_to_log;
642#define IBMVFC_AE_LINKUP 0x0001
643#define IBMVFC_AE_LINKDOWN 0x0002
644#define IBMVFC_AE_RSCN 0x0004
645 dma_addr_t disc_buf_dma;
646 unsigned int partition_number;
647 char partition_name[97];
648 void (*job_step) (struct ibmvfc_host *);
649 struct task_struct *work_thread;
650 wait_queue_head_t init_wait_q;
651 wait_queue_head_t work_wait_q;
652};
653
654#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
655
656#define tgt_dbg(t, fmt, ...) \
657 DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
658
659#define tgt_err(t, fmt, ...) \
660 dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
661
662#define ibmvfc_dbg(vhost, ...) \
663 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
664
665#define ibmvfc_log(vhost, level, ...) \
666 do { \
667 if (level >= (vhost)->log_level) \
668 dev_err((vhost)->dev, ##__VA_ARGS__); \
669 } while (0)
670
671#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__))
672#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__))
673
674#ifdef CONFIG_SCSI_IBMVFC_TRACE
675#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
676#define ibmvfc_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
677#else
678#define ibmvfc_create_trace_file(kobj, attr) 0
679#define ibmvfc_remove_trace_file(kobj, attr) do { } while (0)
680#endif
681
682#endif
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 72b9b2a0eba3..2a2f0094570f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -64,6 +64,10 @@ MODULE_LICENSE("GPL");
64#define BUG_ON(expr) 64#define BUG_ON(expr)
65#endif 65#endif
66 66
67static struct scsi_transport_template *iscsi_tcp_scsi_transport;
68static struct scsi_host_template iscsi_sht;
69static struct iscsi_transport iscsi_tcp_transport;
70
67static unsigned int iscsi_max_lun = 512; 71static unsigned int iscsi_max_lun = 512;
68module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 72module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
69 73
@@ -494,39 +498,43 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
494 * must be called with session lock 498 * must be called with session lock
495 */ 499 */
496static void 500static void
497iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 501iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
498{ 502{
499 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 503 struct iscsi_tcp_task *tcp_task = task->dd_data;
500 struct iscsi_r2t_info *r2t; 504 struct iscsi_r2t_info *r2t;
501 505
502 /* flush ctask's r2t queues */ 506 /* nothing to do for mgmt tasks */
503 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { 507 if (!task->sc)
504 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 508 return;
509
510 /* flush task's r2t queues */
511 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
512 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
505 sizeof(void*)); 513 sizeof(void*));
506 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); 514 debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
507 } 515 }
508 516
509 r2t = tcp_ctask->r2t; 517 r2t = tcp_task->r2t;
510 if (r2t != NULL) { 518 if (r2t != NULL) {
511 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 519 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
512 sizeof(void*)); 520 sizeof(void*));
513 tcp_ctask->r2t = NULL; 521 tcp_task->r2t = NULL;
514 } 522 }
515} 523}
516 524
517/** 525/**
518 * iscsi_data_rsp - SCSI Data-In Response processing 526 * iscsi_data_rsp - SCSI Data-In Response processing
519 * @conn: iscsi connection 527 * @conn: iscsi connection
520 * @ctask: scsi command task 528 * @task: scsi command task
521 **/ 529 **/
522static int 530static int
523iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 531iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
524{ 532{
525 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 533 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
526 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 534 struct iscsi_tcp_task *tcp_task = task->dd_data;
527 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; 535 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
528 struct iscsi_session *session = conn->session; 536 struct iscsi_session *session = conn->session;
529 struct scsi_cmnd *sc = ctask->sc; 537 struct scsi_cmnd *sc = task->sc;
530 int datasn = be32_to_cpu(rhdr->datasn); 538 int datasn = be32_to_cpu(rhdr->datasn);
531 unsigned total_in_length = scsi_in(sc)->length; 539 unsigned total_in_length = scsi_in(sc)->length;
532 540
@@ -534,18 +542,18 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
534 if (tcp_conn->in.datalen == 0) 542 if (tcp_conn->in.datalen == 0)
535 return 0; 543 return 0;
536 544
537 if (tcp_ctask->exp_datasn != datasn) { 545 if (tcp_task->exp_datasn != datasn) {
538 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n", 546 debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
539 __FUNCTION__, tcp_ctask->exp_datasn, datasn); 547 __func__, tcp_task->exp_datasn, datasn);
540 return ISCSI_ERR_DATASN; 548 return ISCSI_ERR_DATASN;
541 } 549 }
542 550
543 tcp_ctask->exp_datasn++; 551 tcp_task->exp_datasn++;
544 552
545 tcp_ctask->data_offset = be32_to_cpu(rhdr->offset); 553 tcp_task->data_offset = be32_to_cpu(rhdr->offset);
546 if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) { 554 if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
547 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n", 555 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
548 __FUNCTION__, tcp_ctask->data_offset, 556 __func__, tcp_task->data_offset,
549 tcp_conn->in.datalen, total_in_length); 557 tcp_conn->in.datalen, total_in_length);
550 return ISCSI_ERR_DATA_OFFSET; 558 return ISCSI_ERR_DATA_OFFSET;
551 } 559 }
@@ -574,7 +582,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
574/** 582/**
575 * iscsi_solicit_data_init - initialize first Data-Out 583 * iscsi_solicit_data_init - initialize first Data-Out
576 * @conn: iscsi connection 584 * @conn: iscsi connection
577 * @ctask: scsi command task 585 * @task: scsi command task
578 * @r2t: R2T info 586 * @r2t: R2T info
579 * 587 *
580 * Notes: 588 * Notes:
@@ -584,7 +592,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
584 * This function is called with connection lock taken. 592 * This function is called with connection lock taken.
585 **/ 593 **/
586static void 594static void
587iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 595iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
588 struct iscsi_r2t_info *r2t) 596 struct iscsi_r2t_info *r2t)
589{ 597{
590 struct iscsi_data *hdr; 598 struct iscsi_data *hdr;
@@ -595,8 +603,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
595 hdr->datasn = cpu_to_be32(r2t->solicit_datasn); 603 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
596 r2t->solicit_datasn++; 604 r2t->solicit_datasn++;
597 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 605 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
598 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 606 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
599 hdr->itt = ctask->hdr->itt; 607 hdr->itt = task->hdr->itt;
600 hdr->exp_statsn = r2t->exp_statsn; 608 hdr->exp_statsn = r2t->exp_statsn;
601 hdr->offset = cpu_to_be32(r2t->data_offset); 609 hdr->offset = cpu_to_be32(r2t->data_offset);
602 if (r2t->data_length > conn->max_xmit_dlength) { 610 if (r2t->data_length > conn->max_xmit_dlength) {
@@ -616,14 +624,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
616/** 624/**
617 * iscsi_r2t_rsp - iSCSI R2T Response processing 625 * iscsi_r2t_rsp - iSCSI R2T Response processing
618 * @conn: iscsi connection 626 * @conn: iscsi connection
619 * @ctask: scsi command task 627 * @task: scsi command task
620 **/ 628 **/
621static int 629static int
622iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 630iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
623{ 631{
624 struct iscsi_r2t_info *r2t; 632 struct iscsi_r2t_info *r2t;
625 struct iscsi_session *session = conn->session; 633 struct iscsi_session *session = conn->session;
626 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 634 struct iscsi_tcp_task *tcp_task = task->dd_data;
627 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 635 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
628 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; 636 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
629 int r2tsn = be32_to_cpu(rhdr->r2tsn); 637 int r2tsn = be32_to_cpu(rhdr->r2tsn);
@@ -636,23 +644,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
636 return ISCSI_ERR_DATALEN; 644 return ISCSI_ERR_DATALEN;
637 } 645 }
638 646
639 if (tcp_ctask->exp_datasn != r2tsn){ 647 if (tcp_task->exp_datasn != r2tsn){
640 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n", 648 debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
641 __FUNCTION__, tcp_ctask->exp_datasn, r2tsn); 649 __func__, tcp_task->exp_datasn, r2tsn);
642 return ISCSI_ERR_R2TSN; 650 return ISCSI_ERR_R2TSN;
643 } 651 }
644 652
645 /* fill-in new R2T associated with the task */ 653 /* fill-in new R2T associated with the task */
646 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 654 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
647 655
648 if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) { 656 if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
649 iscsi_conn_printk(KERN_INFO, conn, 657 iscsi_conn_printk(KERN_INFO, conn,
650 "dropping R2T itt %d in recovery.\n", 658 "dropping R2T itt %d in recovery.\n",
651 ctask->itt); 659 task->itt);
652 return 0; 660 return 0;
653 } 661 }
654 662
655 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 663 rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
656 BUG_ON(!rc); 664 BUG_ON(!rc);
657 665
658 r2t->exp_statsn = rhdr->statsn; 666 r2t->exp_statsn = rhdr->statsn;
@@ -660,7 +668,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
660 if (r2t->data_length == 0) { 668 if (r2t->data_length == 0) {
661 iscsi_conn_printk(KERN_ERR, conn, 669 iscsi_conn_printk(KERN_ERR, conn,
662 "invalid R2T with zero data len\n"); 670 "invalid R2T with zero data len\n");
663 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 671 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
664 sizeof(void*)); 672 sizeof(void*));
665 return ISCSI_ERR_DATALEN; 673 return ISCSI_ERR_DATALEN;
666 } 674 }
@@ -671,12 +679,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
671 r2t->data_length, session->max_burst); 679 r2t->data_length, session->max_burst);
672 680
673 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 681 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
674 if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) { 682 if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
675 iscsi_conn_printk(KERN_ERR, conn, 683 iscsi_conn_printk(KERN_ERR, conn,
676 "invalid R2T with data len %u at offset %u " 684 "invalid R2T with data len %u at offset %u "
677 "and total length %d\n", r2t->data_length, 685 "and total length %d\n", r2t->data_length,
678 r2t->data_offset, scsi_out(ctask->sc)->length); 686 r2t->data_offset, scsi_out(task->sc)->length);
679 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 687 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
680 sizeof(void*)); 688 sizeof(void*));
681 return ISCSI_ERR_DATALEN; 689 return ISCSI_ERR_DATALEN;
682 } 690 }
@@ -684,13 +692,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
684 r2t->ttt = rhdr->ttt; /* no flip */ 692 r2t->ttt = rhdr->ttt; /* no flip */
685 r2t->solicit_datasn = 0; 693 r2t->solicit_datasn = 0;
686 694
687 iscsi_solicit_data_init(conn, ctask, r2t); 695 iscsi_solicit_data_init(conn, task, r2t);
688 696
689 tcp_ctask->exp_datasn = r2tsn + 1; 697 tcp_task->exp_datasn = r2tsn + 1;
690 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); 698 __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
691 conn->r2t_pdus_cnt++; 699 conn->r2t_pdus_cnt++;
692 700
693 iscsi_requeue_ctask(ctask); 701 iscsi_requeue_task(task);
694 return 0; 702 return 0;
695} 703}
696 704
@@ -733,10 +741,8 @@ static int
733iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 741iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
734{ 742{
735 int rc = 0, opcode, ahslen; 743 int rc = 0, opcode, ahslen;
736 struct iscsi_session *session = conn->session;
737 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 744 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
738 struct iscsi_cmd_task *ctask; 745 struct iscsi_task *task;
739 uint32_t itt;
740 746
741 /* verify PDU length */ 747 /* verify PDU length */
742 tcp_conn->in.datalen = ntoh24(hdr->dlength); 748 tcp_conn->in.datalen = ntoh24(hdr->dlength);
@@ -754,7 +760,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
754 760
755 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 761 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
756 /* verify itt (itt encoding: age+cid+itt) */ 762 /* verify itt (itt encoding: age+cid+itt) */
757 rc = iscsi_verify_itt(conn, hdr, &itt); 763 rc = iscsi_verify_itt(conn, hdr->itt);
758 if (rc) 764 if (rc)
759 return rc; 765 return rc;
760 766
@@ -763,16 +769,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
763 769
764 switch(opcode) { 770 switch(opcode) {
765 case ISCSI_OP_SCSI_DATA_IN: 771 case ISCSI_OP_SCSI_DATA_IN:
766 ctask = session->cmds[itt];
767 spin_lock(&conn->session->lock); 772 spin_lock(&conn->session->lock);
768 rc = iscsi_data_rsp(conn, ctask); 773 task = iscsi_itt_to_ctask(conn, hdr->itt);
769 spin_unlock(&conn->session->lock); 774 if (!task)
770 if (rc) 775 rc = ISCSI_ERR_BAD_ITT;
771 return rc; 776 else
777 rc = iscsi_data_rsp(conn, task);
778 if (rc) {
779 spin_unlock(&conn->session->lock);
780 break;
781 }
782
772 if (tcp_conn->in.datalen) { 783 if (tcp_conn->in.datalen) {
773 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 784 struct iscsi_tcp_task *tcp_task = task->dd_data;
774 struct hash_desc *rx_hash = NULL; 785 struct hash_desc *rx_hash = NULL;
775 struct scsi_data_buffer *sdb = scsi_in(ctask->sc); 786 struct scsi_data_buffer *sdb = scsi_in(task->sc);
776 787
777 /* 788 /*
778 * Setup copy of Data-In into the Scsi_Cmnd 789 * Setup copy of Data-In into the Scsi_Cmnd
@@ -787,17 +798,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
787 798
788 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, " 799 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
789 "datalen=%d)\n", tcp_conn, 800 "datalen=%d)\n", tcp_conn,
790 tcp_ctask->data_offset, 801 tcp_task->data_offset,
791 tcp_conn->in.datalen); 802 tcp_conn->in.datalen);
792 return iscsi_segment_seek_sg(&tcp_conn->in.segment, 803 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
793 sdb->table.sgl, 804 sdb->table.sgl,
794 sdb->table.nents, 805 sdb->table.nents,
795 tcp_ctask->data_offset, 806 tcp_task->data_offset,
796 tcp_conn->in.datalen, 807 tcp_conn->in.datalen,
797 iscsi_tcp_process_data_in, 808 iscsi_tcp_process_data_in,
798 rx_hash); 809 rx_hash);
810 spin_unlock(&conn->session->lock);
811 return rc;
799 } 812 }
800 /* fall through */ 813 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
814 spin_unlock(&conn->session->lock);
815 break;
801 case ISCSI_OP_SCSI_CMD_RSP: 816 case ISCSI_OP_SCSI_CMD_RSP:
802 if (tcp_conn->in.datalen) { 817 if (tcp_conn->in.datalen) {
803 iscsi_tcp_data_recv_prep(tcp_conn); 818 iscsi_tcp_data_recv_prep(tcp_conn);
@@ -806,15 +821,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
806 rc = iscsi_complete_pdu(conn, hdr, NULL, 0); 821 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
807 break; 822 break;
808 case ISCSI_OP_R2T: 823 case ISCSI_OP_R2T:
809 ctask = session->cmds[itt]; 824 spin_lock(&conn->session->lock);
810 if (ahslen) 825 task = iscsi_itt_to_ctask(conn, hdr->itt);
826 if (!task)
827 rc = ISCSI_ERR_BAD_ITT;
828 else if (ahslen)
811 rc = ISCSI_ERR_AHSLEN; 829 rc = ISCSI_ERR_AHSLEN;
812 else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { 830 else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
813 spin_lock(&session->lock); 831 rc = iscsi_r2t_rsp(conn, task);
814 rc = iscsi_r2t_rsp(conn, ctask); 832 else
815 spin_unlock(&session->lock);
816 } else
817 rc = ISCSI_ERR_PROTO; 833 rc = ISCSI_ERR_PROTO;
834 spin_unlock(&conn->session->lock);
818 break; 835 break;
819 case ISCSI_OP_LOGIN_RSP: 836 case ISCSI_OP_LOGIN_RSP:
820 case ISCSI_OP_TEXT_RSP: 837 case ISCSI_OP_TEXT_RSP:
@@ -1176,7 +1193,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1176{ 1193{
1177 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1194 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1178 1195
1179 debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn, 1196 debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
1180 conn->hdrdgst_en? ", digest enabled" : ""); 1197 conn->hdrdgst_en? ", digest enabled" : "");
1181 1198
1182 /* Clear the data segment - needs to be filled in by the 1199 /* Clear the data segment - needs to be filled in by the
@@ -1185,7 +1202,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1185 1202
1186 /* If header digest is enabled, compute the CRC and 1203 /* If header digest is enabled, compute the CRC and
1187 * place the digest into the same buffer. We make 1204 * place the digest into the same buffer. We make
1188 * sure that both iscsi_tcp_ctask and mtask have 1205 * sure that both iscsi_tcp_task and mtask have
1189 * sufficient room. 1206 * sufficient room.
1190 */ 1207 */
1191 if (conn->hdrdgst_en) { 1208 if (conn->hdrdgst_en) {
@@ -1217,7 +1234,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
1217 struct hash_desc *tx_hash = NULL; 1234 struct hash_desc *tx_hash = NULL;
1218 unsigned int hdr_spec_len; 1235 unsigned int hdr_spec_len;
1219 1236
1220 debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__, 1237 debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
1221 tcp_conn, offset, len, 1238 tcp_conn, offset, len,
1222 conn->datadgst_en? ", digest enabled" : ""); 1239 conn->datadgst_en? ", digest enabled" : "");
1223 1240
@@ -1242,7 +1259,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1242 struct hash_desc *tx_hash = NULL; 1259 struct hash_desc *tx_hash = NULL;
1243 unsigned int hdr_spec_len; 1260 unsigned int hdr_spec_len;
1244 1261
1245 debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len, 1262 debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
1246 conn->datadgst_en? ", digest enabled" : ""); 1263 conn->datadgst_en? ", digest enabled" : "");
1247 1264
1248 /* Make sure the datalen matches what the caller 1265 /* Make sure the datalen matches what the caller
@@ -1260,7 +1277,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1260/** 1277/**
1261 * iscsi_solicit_data_cont - initialize next Data-Out 1278 * iscsi_solicit_data_cont - initialize next Data-Out
1262 * @conn: iscsi connection 1279 * @conn: iscsi connection
1263 * @ctask: scsi command task 1280 * @task: scsi command task
1264 * @r2t: R2T info 1281 * @r2t: R2T info
1265 * @left: bytes left to transfer 1282 * @left: bytes left to transfer
1266 * 1283 *
@@ -1271,7 +1288,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1271 * Called under connection lock. 1288 * Called under connection lock.
1272 **/ 1289 **/
1273static int 1290static int
1274iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1291iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
1275 struct iscsi_r2t_info *r2t) 1292 struct iscsi_r2t_info *r2t)
1276{ 1293{
1277 struct iscsi_data *hdr; 1294 struct iscsi_data *hdr;
@@ -1288,8 +1305,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1288 hdr->datasn = cpu_to_be32(r2t->solicit_datasn); 1305 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1289 r2t->solicit_datasn++; 1306 r2t->solicit_datasn++;
1290 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 1307 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1291 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 1308 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
1292 hdr->itt = ctask->hdr->itt; 1309 hdr->itt = task->hdr->itt;
1293 hdr->exp_statsn = r2t->exp_statsn; 1310 hdr->exp_statsn = r2t->exp_statsn;
1294 new_offset = r2t->data_offset + r2t->sent; 1311 new_offset = r2t->data_offset + r2t->sent;
1295 hdr->offset = cpu_to_be32(new_offset); 1312 hdr->offset = cpu_to_be32(new_offset);
@@ -1307,89 +1324,76 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1307} 1324}
1308 1325
1309/** 1326/**
1310 * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands 1327 * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1311 * @conn: iscsi connection 1328 * @conn: iscsi connection
1312 * @ctask: scsi command task 1329 * @task: scsi command task
1313 * @sc: scsi command 1330 * @sc: scsi command
1314 **/ 1331 **/
1315static int 1332static int
1316iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask) 1333iscsi_tcp_task_init(struct iscsi_task *task)
1317{ 1334{
1318 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1335 struct iscsi_tcp_task *tcp_task = task->dd_data;
1319 struct iscsi_conn *conn = ctask->conn; 1336 struct iscsi_conn *conn = task->conn;
1320 struct scsi_cmnd *sc = ctask->sc; 1337 struct scsi_cmnd *sc = task->sc;
1321 int err; 1338 int err;
1322 1339
1323 BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); 1340 if (!sc) {
1324 tcp_ctask->sent = 0; 1341 /*
1325 tcp_ctask->exp_datasn = 0; 1342 * mgmt tasks do not have a scatterlist since they come
1343 * in from the iscsi interface.
1344 */
1345 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
1346 task->itt);
1347
1348 /* Prepare PDU, optionally w/ immediate data */
1349 iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
1350
1351 /* If we have immediate data, attach a payload */
1352 if (task->data_count)
1353 iscsi_tcp_send_linear_data_prepare(conn, task->data,
1354 task->data_count);
1355 return 0;
1356 }
1357
1358 BUG_ON(__kfifo_len(tcp_task->r2tqueue));
1359 tcp_task->sent = 0;
1360 tcp_task->exp_datasn = 0;
1326 1361
1327 /* Prepare PDU, optionally w/ immediate data */ 1362 /* Prepare PDU, optionally w/ immediate data */
1328 debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n", 1363 debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
1329 conn->id, ctask->itt, ctask->imm_count, 1364 conn->id, task->itt, task->imm_count,
1330 ctask->unsol_count); 1365 task->unsol_count);
1331 iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len); 1366 iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
1332 1367
1333 if (!ctask->imm_count) 1368 if (!task->imm_count)
1334 return 0; 1369 return 0;
1335 1370
1336 /* If we have immediate data, attach a payload */ 1371 /* If we have immediate data, attach a payload */
1337 err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl, 1372 err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
1338 scsi_out(sc)->table.nents, 1373 scsi_out(sc)->table.nents,
1339 0, ctask->imm_count); 1374 0, task->imm_count);
1340 if (err) 1375 if (err)
1341 return err; 1376 return err;
1342 tcp_ctask->sent += ctask->imm_count; 1377 tcp_task->sent += task->imm_count;
1343 ctask->imm_count = 0; 1378 task->imm_count = 0;
1344 return 0;
1345}
1346
1347/**
1348 * iscsi_tcp_mtask_xmit - xmit management(immediate) task
1349 * @conn: iscsi connection
1350 * @mtask: task management task
1351 *
1352 * Notes:
1353 * The function can return -EAGAIN in which case caller must
1354 * call it again later, or recover. '0' return code means successful
1355 * xmit.
1356 **/
1357static int
1358iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1359{
1360 int rc;
1361
1362 /* Flush any pending data first. */
1363 rc = iscsi_tcp_flush(conn);
1364 if (rc < 0)
1365 return rc;
1366
1367 if (mtask->hdr->itt == RESERVED_ITT) {
1368 struct iscsi_session *session = conn->session;
1369
1370 spin_lock_bh(&session->lock);
1371 iscsi_free_mgmt_task(conn, mtask);
1372 spin_unlock_bh(&session->lock);
1373 }
1374
1375 return 0; 1379 return 0;
1376} 1380}
1377 1381
1378/* 1382/*
1379 * iscsi_tcp_ctask_xmit - xmit normal PDU task 1383 * iscsi_tcp_task_xmit - xmit normal PDU task
1380 * @conn: iscsi connection 1384 * @task: iscsi command task
1381 * @ctask: iscsi command task
1382 * 1385 *
1383 * We're expected to return 0 when everything was transmitted succesfully, 1386 * We're expected to return 0 when everything was transmitted succesfully,
1384 * -EAGAIN if there's still data in the queue, or != 0 for any other kind 1387 * -EAGAIN if there's still data in the queue, or != 0 for any other kind
1385 * of error. 1388 * of error.
1386 */ 1389 */
1387static int 1390static int
1388iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1391iscsi_tcp_task_xmit(struct iscsi_task *task)
1389{ 1392{
1390 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1393 struct iscsi_conn *conn = task->conn;
1391 struct scsi_cmnd *sc = ctask->sc; 1394 struct iscsi_tcp_task *tcp_task = task->dd_data;
1392 struct scsi_data_buffer *sdb = scsi_out(sc); 1395 struct scsi_cmnd *sc = task->sc;
1396 struct scsi_data_buffer *sdb;
1393 int rc = 0; 1397 int rc = 0;
1394 1398
1395flush: 1399flush:
@@ -1398,31 +1402,39 @@ flush:
1398 if (rc < 0) 1402 if (rc < 0)
1399 return rc; 1403 return rc;
1400 1404
1405 /* mgmt command */
1406 if (!sc) {
1407 if (task->hdr->itt == RESERVED_ITT)
1408 iscsi_put_task(task);
1409 return 0;
1410 }
1411
1401 /* Are we done already? */ 1412 /* Are we done already? */
1402 if (sc->sc_data_direction != DMA_TO_DEVICE) 1413 if (sc->sc_data_direction != DMA_TO_DEVICE)
1403 return 0; 1414 return 0;
1404 1415
1405 if (ctask->unsol_count != 0) { 1416 sdb = scsi_out(sc);
1406 struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr; 1417 if (task->unsol_count != 0) {
1418 struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
1407 1419
1408 /* Prepare a header for the unsolicited PDU. 1420 /* Prepare a header for the unsolicited PDU.
1409 * The amount of data we want to send will be 1421 * The amount of data we want to send will be
1410 * in ctask->data_count. 1422 * in task->data_count.
1411 * FIXME: return the data count instead. 1423 * FIXME: return the data count instead.
1412 */ 1424 */
1413 iscsi_prep_unsolicit_data_pdu(ctask, hdr); 1425 iscsi_prep_unsolicit_data_pdu(task, hdr);
1414 1426
1415 debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n", 1427 debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
1416 ctask->itt, tcp_ctask->sent, ctask->data_count); 1428 task->itt, tcp_task->sent, task->data_count);
1417 1429
1418 iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr)); 1430 iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
1419 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl, 1431 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
1420 sdb->table.nents, tcp_ctask->sent, 1432 sdb->table.nents, tcp_task->sent,
1421 ctask->data_count); 1433 task->data_count);
1422 if (rc) 1434 if (rc)
1423 goto fail; 1435 goto fail;
1424 tcp_ctask->sent += ctask->data_count; 1436 tcp_task->sent += task->data_count;
1425 ctask->unsol_count -= ctask->data_count; 1437 task->unsol_count -= task->data_count;
1426 goto flush; 1438 goto flush;
1427 } else { 1439 } else {
1428 struct iscsi_session *session = conn->session; 1440 struct iscsi_session *session = conn->session;
@@ -1431,22 +1443,22 @@ flush:
1431 /* All unsolicited PDUs sent. Check for solicited PDUs. 1443 /* All unsolicited PDUs sent. Check for solicited PDUs.
1432 */ 1444 */
1433 spin_lock_bh(&session->lock); 1445 spin_lock_bh(&session->lock);
1434 r2t = tcp_ctask->r2t; 1446 r2t = tcp_task->r2t;
1435 if (r2t != NULL) { 1447 if (r2t != NULL) {
1436 /* Continue with this R2T? */ 1448 /* Continue with this R2T? */
1437 if (!iscsi_solicit_data_cont(conn, ctask, r2t)) { 1449 if (!iscsi_solicit_data_cont(conn, task, r2t)) {
1438 debug_scsi(" done with r2t %p\n", r2t); 1450 debug_scsi(" done with r2t %p\n", r2t);
1439 1451
1440 __kfifo_put(tcp_ctask->r2tpool.queue, 1452 __kfifo_put(tcp_task->r2tpool.queue,
1441 (void*)&r2t, sizeof(void*)); 1453 (void*)&r2t, sizeof(void*));
1442 tcp_ctask->r2t = r2t = NULL; 1454 tcp_task->r2t = r2t = NULL;
1443 } 1455 }
1444 } 1456 }
1445 1457
1446 if (r2t == NULL) { 1458 if (r2t == NULL) {
1447 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, 1459 __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
1448 sizeof(void*)); 1460 sizeof(void*));
1449 r2t = tcp_ctask->r2t; 1461 r2t = tcp_task->r2t;
1450 } 1462 }
1451 spin_unlock_bh(&session->lock); 1463 spin_unlock_bh(&session->lock);
1452 1464
@@ -1457,7 +1469,7 @@ flush:
1457 } 1469 }
1458 1470
1459 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", 1471 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1460 r2t, r2t->solicit_datasn - 1, ctask->itt, 1472 r2t, r2t->solicit_datasn - 1, task->itt,
1461 r2t->data_offset + r2t->sent, r2t->data_count); 1473 r2t->data_offset + r2t->sent, r2t->data_count);
1462 1474
1463 iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr, 1475 iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
@@ -1469,7 +1481,7 @@ flush:
1469 r2t->data_count); 1481 r2t->data_count);
1470 if (rc) 1482 if (rc)
1471 goto fail; 1483 goto fail;
1472 tcp_ctask->sent += r2t->data_count; 1484 tcp_task->sent += r2t->data_count;
1473 r2t->sent += r2t->data_count; 1485 r2t->sent += r2t->data_count;
1474 goto flush; 1486 goto flush;
1475 } 1487 }
@@ -1486,7 +1498,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1486 struct iscsi_cls_conn *cls_conn; 1498 struct iscsi_cls_conn *cls_conn;
1487 struct iscsi_tcp_conn *tcp_conn; 1499 struct iscsi_tcp_conn *tcp_conn;
1488 1500
1489 cls_conn = iscsi_conn_setup(cls_session, conn_idx); 1501 cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
1490 if (!cls_conn) 1502 if (!cls_conn)
1491 return NULL; 1503 return NULL;
1492 conn = cls_conn->dd_data; 1504 conn = cls_conn->dd_data;
@@ -1496,18 +1508,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1496 */ 1508 */
1497 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN; 1509 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1498 1510
1499 tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL); 1511 tcp_conn = conn->dd_data;
1500 if (!tcp_conn)
1501 goto tcp_conn_alloc_fail;
1502
1503 conn->dd_data = tcp_conn;
1504 tcp_conn->iscsi_conn = conn; 1512 tcp_conn->iscsi_conn = conn;
1505 1513
1506 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1514 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1507 CRYPTO_ALG_ASYNC); 1515 CRYPTO_ALG_ASYNC);
1508 tcp_conn->tx_hash.flags = 0; 1516 tcp_conn->tx_hash.flags = 0;
1509 if (IS_ERR(tcp_conn->tx_hash.tfm)) 1517 if (IS_ERR(tcp_conn->tx_hash.tfm))
1510 goto free_tcp_conn; 1518 goto free_conn;
1511 1519
1512 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1520 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1513 CRYPTO_ALG_ASYNC); 1521 CRYPTO_ALG_ASYNC);
@@ -1519,14 +1527,12 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1519 1527
1520free_tx_tfm: 1528free_tx_tfm:
1521 crypto_free_hash(tcp_conn->tx_hash.tfm); 1529 crypto_free_hash(tcp_conn->tx_hash.tfm);
1522free_tcp_conn: 1530free_conn:
1523 iscsi_conn_printk(KERN_ERR, conn, 1531 iscsi_conn_printk(KERN_ERR, conn,
1524 "Could not create connection due to crc32c " 1532 "Could not create connection due to crc32c "
1525 "loading error. Make sure the crc32c " 1533 "loading error. Make sure the crc32c "
1526 "module is built as a module or into the " 1534 "module is built as a module or into the "
1527 "kernel\n"); 1535 "kernel\n");
1528 kfree(tcp_conn);
1529tcp_conn_alloc_fail:
1530 iscsi_conn_teardown(cls_conn); 1536 iscsi_conn_teardown(cls_conn);
1531 return NULL; 1537 return NULL;
1532} 1538}
@@ -1547,7 +1553,6 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
1547 1553
1548 spin_lock_bh(&session->lock); 1554 spin_lock_bh(&session->lock);
1549 tcp_conn->sock = NULL; 1555 tcp_conn->sock = NULL;
1550 conn->recv_lock = NULL;
1551 spin_unlock_bh(&session->lock); 1556 spin_unlock_bh(&session->lock);
1552 sockfd_put(sock); 1557 sockfd_put(sock);
1553} 1558}
@@ -1559,20 +1564,32 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1559 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1564 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1560 1565
1561 iscsi_tcp_release_conn(conn); 1566 iscsi_tcp_release_conn(conn);
1562 iscsi_conn_teardown(cls_conn);
1563 1567
1564 if (tcp_conn->tx_hash.tfm) 1568 if (tcp_conn->tx_hash.tfm)
1565 crypto_free_hash(tcp_conn->tx_hash.tfm); 1569 crypto_free_hash(tcp_conn->tx_hash.tfm);
1566 if (tcp_conn->rx_hash.tfm) 1570 if (tcp_conn->rx_hash.tfm)
1567 crypto_free_hash(tcp_conn->rx_hash.tfm); 1571 crypto_free_hash(tcp_conn->rx_hash.tfm);
1568 1572
1569 kfree(tcp_conn); 1573 iscsi_conn_teardown(cls_conn);
1570} 1574}
1571 1575
1572static void 1576static void
1573iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 1577iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1574{ 1578{
1575 struct iscsi_conn *conn = cls_conn->dd_data; 1579 struct iscsi_conn *conn = cls_conn->dd_data;
1580 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1581
1582 /* userspace may have goofed up and not bound us */
1583 if (!tcp_conn->sock)
1584 return;
1585 /*
1586 * Make sure our recv side is stopped.
1587 * Older tools called conn stop before ep_disconnect
1588 * so IO could still be coming in.
1589 */
1590 write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
1591 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1592 write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
1576 1593
1577 iscsi_conn_stop(cls_conn, flag); 1594 iscsi_conn_stop(cls_conn, flag);
1578 iscsi_tcp_release_conn(conn); 1595 iscsi_tcp_release_conn(conn);
@@ -1623,6 +1640,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1623 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 1640 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
1624 int is_leading) 1641 int is_leading)
1625{ 1642{
1643 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1644 struct iscsi_host *ihost = shost_priv(shost);
1626 struct iscsi_conn *conn = cls_conn->dd_data; 1645 struct iscsi_conn *conn = cls_conn->dd_data;
1627 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1646 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1628 struct sock *sk; 1647 struct sock *sk;
@@ -1646,8 +1665,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1646 if (err) 1665 if (err)
1647 goto free_socket; 1666 goto free_socket;
1648 1667
1649 err = iscsi_tcp_get_addr(conn, sock, conn->local_address, 1668 err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
1650 &conn->local_port, kernel_getsockname); 1669 &ihost->local_port, kernel_getsockname);
1651 if (err) 1670 if (err)
1652 goto free_socket; 1671 goto free_socket;
1653 1672
@@ -1664,13 +1683,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1664 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ 1683 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
1665 sk->sk_allocation = GFP_ATOMIC; 1684 sk->sk_allocation = GFP_ATOMIC;
1666 1685
1667 /* FIXME: disable Nagle's algorithm */
1668
1669 /*
1670 * Intercept TCP callbacks for sendfile like receive
1671 * processing.
1672 */
1673 conn->recv_lock = &sk->sk_callback_lock;
1674 iscsi_conn_set_callbacks(conn); 1686 iscsi_conn_set_callbacks(conn);
1675 tcp_conn->sendpage = tcp_conn->sock->ops->sendpage; 1687 tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
1676 /* 1688 /*
@@ -1684,21 +1696,6 @@ free_socket:
1684 return err; 1696 return err;
1685} 1697}
1686 1698
1687/* called with host lock */
1688static void
1689iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1690{
1691 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
1692
1693 /* Prepare PDU, optionally w/ immediate data */
1694 iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
1695
1696 /* If we have immediate data, attach a payload */
1697 if (mtask->data_count)
1698 iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
1699 mtask->data_count);
1700}
1701
1702static int 1699static int
1703iscsi_r2tpool_alloc(struct iscsi_session *session) 1700iscsi_r2tpool_alloc(struct iscsi_session *session)
1704{ 1701{
@@ -1709,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
1709 * initialize per-task: R2T pool and xmit queue 1706 * initialize per-task: R2T pool and xmit queue
1710 */ 1707 */
1711 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 1708 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1712 struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; 1709 struct iscsi_task *task = session->cmds[cmd_i];
1713 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1710 struct iscsi_tcp_task *tcp_task = task->dd_data;
1714 1711
1715 /* 1712 /*
1716 * pre-allocated x4 as much r2ts to handle race when 1713 * pre-allocated x4 as much r2ts to handle race when
@@ -1719,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
1719 */ 1716 */
1720 1717
1721 /* R2T pool */ 1718 /* R2T pool */
1722 if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL, 1719 if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
1723 sizeof(struct iscsi_r2t_info))) { 1720 sizeof(struct iscsi_r2t_info))) {
1724 goto r2t_alloc_fail; 1721 goto r2t_alloc_fail;
1725 } 1722 }
1726 1723
1727 /* R2T xmit queue */ 1724 /* R2T xmit queue */
1728 tcp_ctask->r2tqueue = kfifo_alloc( 1725 tcp_task->r2tqueue = kfifo_alloc(
1729 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); 1726 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
1730 if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) { 1727 if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
1731 iscsi_pool_free(&tcp_ctask->r2tpool); 1728 iscsi_pool_free(&tcp_task->r2tpool);
1732 goto r2t_alloc_fail; 1729 goto r2t_alloc_fail;
1733 } 1730 }
1734 } 1731 }
@@ -1737,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
1737 1734
1738r2t_alloc_fail: 1735r2t_alloc_fail:
1739 for (i = 0; i < cmd_i; i++) { 1736 for (i = 0; i < cmd_i; i++) {
1740 struct iscsi_cmd_task *ctask = session->cmds[i]; 1737 struct iscsi_task *task = session->cmds[i];
1741 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1738 struct iscsi_tcp_task *tcp_task = task->dd_data;
1742 1739
1743 kfifo_free(tcp_ctask->r2tqueue); 1740 kfifo_free(tcp_task->r2tqueue);
1744 iscsi_pool_free(&tcp_ctask->r2tpool); 1741 iscsi_pool_free(&tcp_task->r2tpool);
1745 } 1742 }
1746 return -ENOMEM; 1743 return -ENOMEM;
1747} 1744}
@@ -1752,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
1752 int i; 1749 int i;
1753 1750
1754 for (i = 0; i < session->cmds_max; i++) { 1751 for (i = 0; i < session->cmds_max; i++) {
1755 struct iscsi_cmd_task *ctask = session->cmds[i]; 1752 struct iscsi_task *task = session->cmds[i];
1756 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1753 struct iscsi_tcp_task *tcp_task = task->dd_data;
1757 1754
1758 kfifo_free(tcp_ctask->r2tqueue); 1755 kfifo_free(tcp_task->r2tqueue);
1759 iscsi_pool_free(&tcp_ctask->r2tpool); 1756 iscsi_pool_free(&tcp_task->r2tpool);
1760 } 1757 }
1761} 1758}
1762 1759
@@ -1821,29 +1818,6 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
1821 return len; 1818 return len;
1822} 1819}
1823 1820
1824static int
1825iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
1826 char *buf)
1827{
1828 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1829 int len;
1830
1831 switch (param) {
1832 case ISCSI_HOST_PARAM_IPADDRESS:
1833 spin_lock_bh(&session->lock);
1834 if (!session->leadconn)
1835 len = -ENODEV;
1836 else
1837 len = sprintf(buf, "%s\n",
1838 session->leadconn->local_address);
1839 spin_unlock_bh(&session->lock);
1840 break;
1841 default:
1842 return iscsi_host_get_param(shost, param, buf);
1843 }
1844 return len;
1845}
1846
1847static void 1821static void
1848iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 1822iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
1849{ 1823{
@@ -1869,54 +1843,70 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
1869} 1843}
1870 1844
1871static struct iscsi_cls_session * 1845static struct iscsi_cls_session *
1872iscsi_tcp_session_create(struct iscsi_transport *iscsit, 1846iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
1873 struct scsi_transport_template *scsit, 1847 uint16_t qdepth, uint32_t initial_cmdsn,
1874 uint16_t cmds_max, uint16_t qdepth, 1848 uint32_t *hostno)
1875 uint32_t initial_cmdsn, uint32_t *hostno)
1876{ 1849{
1877 struct iscsi_cls_session *cls_session; 1850 struct iscsi_cls_session *cls_session;
1878 struct iscsi_session *session; 1851 struct iscsi_session *session;
1879 uint32_t hn; 1852 struct Scsi_Host *shost;
1880 int cmd_i; 1853 int cmd_i;
1881 1854
1882 cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth, 1855 if (ep) {
1883 sizeof(struct iscsi_tcp_cmd_task), 1856 printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
1884 sizeof(struct iscsi_tcp_mgmt_task),
1885 initial_cmdsn, &hn);
1886 if (!cls_session)
1887 return NULL; 1857 return NULL;
1888 *hostno = hn;
1889
1890 session = class_to_transport_session(cls_session);
1891 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1892 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
1893 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1894
1895 ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
1896 ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
1897 } 1858 }
1898 1859
1899 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { 1860 shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
1900 struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; 1861 if (!shost)
1901 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; 1862 return NULL;
1863 shost->transportt = iscsi_tcp_scsi_transport;
1864 shost->max_lun = iscsi_max_lun;
1865 shost->max_id = 0;
1866 shost->max_channel = 0;
1867 shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
1868
1869 if (iscsi_host_add(shost, NULL))
1870 goto free_host;
1871 *hostno = shost->host_no;
1872
1873 cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
1874 sizeof(struct iscsi_tcp_task),
1875 initial_cmdsn, 0);
1876 if (!cls_session)
1877 goto remove_host;
1878 session = cls_session->dd_data;
1879
1880 shost->can_queue = session->scsi_cmds_max;
1881 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1882 struct iscsi_task *task = session->cmds[cmd_i];
1883 struct iscsi_tcp_task *tcp_task = task->dd_data;
1902 1884
1903 mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr; 1885 task->hdr = &tcp_task->hdr.cmd_hdr;
1886 task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
1904 } 1887 }
1905 1888
1906 if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session))) 1889 if (iscsi_r2tpool_alloc(session))
1907 goto r2tpool_alloc_fail; 1890 goto remove_session;
1908
1909 return cls_session; 1891 return cls_session;
1910 1892
1911r2tpool_alloc_fail: 1893remove_session:
1912 iscsi_session_teardown(cls_session); 1894 iscsi_session_teardown(cls_session);
1895remove_host:
1896 iscsi_host_remove(shost);
1897free_host:
1898 iscsi_host_free(shost);
1913 return NULL; 1899 return NULL;
1914} 1900}
1915 1901
1916static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) 1902static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
1917{ 1903{
1918 iscsi_r2tpool_free(class_to_transport_session(cls_session)); 1904 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1919 iscsi_session_teardown(cls_session); 1905
1906 iscsi_r2tpool_free(cls_session->dd_data);
1907
1908 iscsi_host_remove(shost);
1909 iscsi_host_free(shost);
1920} 1910}
1921 1911
1922static int iscsi_tcp_slave_configure(struct scsi_device *sdev) 1912static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
@@ -1971,14 +1961,11 @@ static struct iscsi_transport iscsi_tcp_transport = {
1971 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 1961 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
1972 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 1962 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
1973 ISCSI_LU_RESET_TMO | 1963 ISCSI_LU_RESET_TMO |
1974 ISCSI_PING_TMO | ISCSI_RECV_TMO, 1964 ISCSI_PING_TMO | ISCSI_RECV_TMO |
1965 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
1975 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | 1966 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
1976 ISCSI_HOST_INITIATOR_NAME | 1967 ISCSI_HOST_INITIATOR_NAME |
1977 ISCSI_HOST_NETDEV_NAME, 1968 ISCSI_HOST_NETDEV_NAME,
1978 .host_template = &iscsi_sht,
1979 .conndata_size = sizeof(struct iscsi_conn),
1980 .max_conn = 1,
1981 .max_cmd_len = 16,
1982 /* session management */ 1969 /* session management */
1983 .create_session = iscsi_tcp_session_create, 1970 .create_session = iscsi_tcp_session_create,
1984 .destroy_session = iscsi_tcp_session_destroy, 1971 .destroy_session = iscsi_tcp_session_destroy,
@@ -1992,16 +1979,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
1992 .start_conn = iscsi_conn_start, 1979 .start_conn = iscsi_conn_start,
1993 .stop_conn = iscsi_tcp_conn_stop, 1980 .stop_conn = iscsi_tcp_conn_stop,
1994 /* iscsi host params */ 1981 /* iscsi host params */
1995 .get_host_param = iscsi_tcp_host_get_param, 1982 .get_host_param = iscsi_host_get_param,
1996 .set_host_param = iscsi_host_set_param, 1983 .set_host_param = iscsi_host_set_param,
1997 /* IO */ 1984 /* IO */
1998 .send_pdu = iscsi_conn_send_pdu, 1985 .send_pdu = iscsi_conn_send_pdu,
1999 .get_stats = iscsi_conn_get_stats, 1986 .get_stats = iscsi_conn_get_stats,
2000 .init_cmd_task = iscsi_tcp_ctask_init, 1987 .init_task = iscsi_tcp_task_init,
2001 .init_mgmt_task = iscsi_tcp_mtask_init, 1988 .xmit_task = iscsi_tcp_task_xmit,
2002 .xmit_cmd_task = iscsi_tcp_ctask_xmit, 1989 .cleanup_task = iscsi_tcp_cleanup_task,
2003 .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
2004 .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
2005 /* recovery */ 1990 /* recovery */
2006 .session_recovery_timedout = iscsi_session_recovery_timedout, 1991 .session_recovery_timedout = iscsi_session_recovery_timedout,
2007}; 1992};
@@ -2014,9 +1999,10 @@ iscsi_tcp_init(void)
2014 iscsi_max_lun); 1999 iscsi_max_lun);
2015 return -EINVAL; 2000 return -EINVAL;
2016 } 2001 }
2017 iscsi_tcp_transport.max_lun = iscsi_max_lun;
2018 2002
2019 if (!iscsi_register_transport(&iscsi_tcp_transport)) 2003 iscsi_tcp_scsi_transport = iscsi_register_transport(
2004 &iscsi_tcp_transport);
2005 if (!iscsi_tcp_scsi_transport)
2020 return -ENODEV; 2006 return -ENODEV;
2021 2007
2022 return 0; 2008 return 0;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index ed0b991d1e72..498d8ca39848 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -103,11 +103,6 @@ struct iscsi_data_task {
103 char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */ 103 char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
104}; 104};
105 105
106struct iscsi_tcp_mgmt_task {
107 struct iscsi_hdr hdr;
108 char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
109};
110
111struct iscsi_r2t_info { 106struct iscsi_r2t_info {
112 __be32 ttt; /* copied from R2T */ 107 __be32 ttt; /* copied from R2T */
113 __be32 exp_statsn; /* copied from R2T */ 108 __be32 exp_statsn; /* copied from R2T */
@@ -119,7 +114,7 @@ struct iscsi_r2t_info {
119 struct iscsi_data_task dtask; /* Data-Out header buf */ 114 struct iscsi_data_task dtask; /* Data-Out header buf */
120}; 115};
121 116
122struct iscsi_tcp_cmd_task { 117struct iscsi_tcp_task {
123 struct iscsi_hdr_buff { 118 struct iscsi_hdr_buff {
124 struct iscsi_cmd cmd_hdr; 119 struct iscsi_cmd cmd_hdr;
125 char hdrextbuf[ISCSI_MAX_AHS_SIZE + 120 char hdrextbuf[ISCSI_MAX_AHS_SIZE +
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b43bf1d60dac..299e075a7b34 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -38,14 +38,6 @@
38#include <scsi/scsi_transport_iscsi.h> 38#include <scsi/scsi_transport_iscsi.h>
39#include <scsi/libiscsi.h> 39#include <scsi/libiscsi.h>
40 40
41struct iscsi_session *
42class_to_transport_session(struct iscsi_cls_session *cls_session)
43{
44 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
45 return iscsi_hostdata(shost->hostdata);
46}
47EXPORT_SYMBOL_GPL(class_to_transport_session);
48
49/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ 41/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
50#define SNA32_CHECK 2147483648UL 42#define SNA32_CHECK 2147483648UL
51 43
@@ -87,68 +79,70 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
87 * xmit thread 79 * xmit thread
88 */ 80 */
89 if (!list_empty(&session->leadconn->xmitqueue) || 81 if (!list_empty(&session->leadconn->xmitqueue) ||
90 !list_empty(&session->leadconn->mgmtqueue)) 82 !list_empty(&session->leadconn->mgmtqueue)) {
91 scsi_queue_work(session->host, 83 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
92 &session->leadconn->xmitwork); 84 scsi_queue_work(session->host,
85 &session->leadconn->xmitwork);
86 }
93 } 87 }
94} 88}
95EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 89EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
96 90
97void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, 91void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
98 struct iscsi_data *hdr) 92 struct iscsi_data *hdr)
99{ 93{
100 struct iscsi_conn *conn = ctask->conn; 94 struct iscsi_conn *conn = task->conn;
101 95
102 memset(hdr, 0, sizeof(struct iscsi_data)); 96 memset(hdr, 0, sizeof(struct iscsi_data));
103 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); 97 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
104 hdr->datasn = cpu_to_be32(ctask->unsol_datasn); 98 hdr->datasn = cpu_to_be32(task->unsol_datasn);
105 ctask->unsol_datasn++; 99 task->unsol_datasn++;
106 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 100 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
107 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 101 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
108 102
109 hdr->itt = ctask->hdr->itt; 103 hdr->itt = task->hdr->itt;
110 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 104 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
111 hdr->offset = cpu_to_be32(ctask->unsol_offset); 105 hdr->offset = cpu_to_be32(task->unsol_offset);
112 106
113 if (ctask->unsol_count > conn->max_xmit_dlength) { 107 if (task->unsol_count > conn->max_xmit_dlength) {
114 hton24(hdr->dlength, conn->max_xmit_dlength); 108 hton24(hdr->dlength, conn->max_xmit_dlength);
115 ctask->data_count = conn->max_xmit_dlength; 109 task->data_count = conn->max_xmit_dlength;
116 ctask->unsol_offset += ctask->data_count; 110 task->unsol_offset += task->data_count;
117 hdr->flags = 0; 111 hdr->flags = 0;
118 } else { 112 } else {
119 hton24(hdr->dlength, ctask->unsol_count); 113 hton24(hdr->dlength, task->unsol_count);
120 ctask->data_count = ctask->unsol_count; 114 task->data_count = task->unsol_count;
121 hdr->flags = ISCSI_FLAG_CMD_FINAL; 115 hdr->flags = ISCSI_FLAG_CMD_FINAL;
122 } 116 }
123} 117}
124EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu); 118EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
125 119
126static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len) 120static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
127{ 121{
128 unsigned exp_len = ctask->hdr_len + len; 122 unsigned exp_len = task->hdr_len + len;
129 123
130 if (exp_len > ctask->hdr_max) { 124 if (exp_len > task->hdr_max) {
131 WARN_ON(1); 125 WARN_ON(1);
132 return -EINVAL; 126 return -EINVAL;
133 } 127 }
134 128
135 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */ 129 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
136 ctask->hdr_len = exp_len; 130 task->hdr_len = exp_len;
137 return 0; 131 return 0;
138} 132}
139 133
140/* 134/*
141 * make an extended cdb AHS 135 * make an extended cdb AHS
142 */ 136 */
143static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask) 137static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
144{ 138{
145 struct scsi_cmnd *cmd = ctask->sc; 139 struct scsi_cmnd *cmd = task->sc;
146 unsigned rlen, pad_len; 140 unsigned rlen, pad_len;
147 unsigned short ahslength; 141 unsigned short ahslength;
148 struct iscsi_ecdb_ahdr *ecdb_ahdr; 142 struct iscsi_ecdb_ahdr *ecdb_ahdr;
149 int rc; 143 int rc;
150 144
151 ecdb_ahdr = iscsi_next_hdr(ctask); 145 ecdb_ahdr = iscsi_next_hdr(task);
152 rlen = cmd->cmd_len - ISCSI_CDB_SIZE; 146 rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
153 147
154 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); 148 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
@@ -156,7 +150,7 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
156 150
157 pad_len = iscsi_padding(rlen); 151 pad_len = iscsi_padding(rlen);
158 152
159 rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) + 153 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
160 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); 154 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
161 if (rc) 155 if (rc)
162 return rc; 156 return rc;
@@ -171,19 +165,19 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
171 165
172 debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d " 166 debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
173 "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n", 167 "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
174 cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len); 168 cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
175 169
176 return 0; 170 return 0;
177} 171}
178 172
179static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask) 173static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
180{ 174{
181 struct scsi_cmnd *sc = ctask->sc; 175 struct scsi_cmnd *sc = task->sc;
182 struct iscsi_rlength_ahdr *rlen_ahdr; 176 struct iscsi_rlength_ahdr *rlen_ahdr;
183 int rc; 177 int rc;
184 178
185 rlen_ahdr = iscsi_next_hdr(ctask); 179 rlen_ahdr = iscsi_next_hdr(task);
186 rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr)); 180 rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
187 if (rc) 181 if (rc)
188 return rc; 182 return rc;
189 183
@@ -203,28 +197,28 @@ static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
203 197
204/** 198/**
205 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu 199 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
206 * @ctask: iscsi cmd task 200 * @task: iscsi task
207 * 201 *
208 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set 202 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
209 * fields like dlength or final based on how much data it sends 203 * fields like dlength or final based on how much data it sends
210 */ 204 */
211static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) 205static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
212{ 206{
213 struct iscsi_conn *conn = ctask->conn; 207 struct iscsi_conn *conn = task->conn;
214 struct iscsi_session *session = conn->session; 208 struct iscsi_session *session = conn->session;
215 struct iscsi_cmd *hdr = ctask->hdr; 209 struct iscsi_cmd *hdr = task->hdr;
216 struct scsi_cmnd *sc = ctask->sc; 210 struct scsi_cmnd *sc = task->sc;
217 unsigned hdrlength, cmd_len; 211 unsigned hdrlength, cmd_len;
218 int rc; 212 int rc;
219 213
220 ctask->hdr_len = 0; 214 task->hdr_len = 0;
221 rc = iscsi_add_hdr(ctask, sizeof(*hdr)); 215 rc = iscsi_add_hdr(task, sizeof(*hdr));
222 if (rc) 216 if (rc)
223 return rc; 217 return rc;
224 hdr->opcode = ISCSI_OP_SCSI_CMD; 218 hdr->opcode = ISCSI_OP_SCSI_CMD;
225 hdr->flags = ISCSI_ATTR_SIMPLE; 219 hdr->flags = ISCSI_ATTR_SIMPLE;
226 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 220 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
227 hdr->itt = build_itt(ctask->itt, session->age); 221 hdr->itt = build_itt(task->itt, session->age);
228 hdr->cmdsn = cpu_to_be32(session->cmdsn); 222 hdr->cmdsn = cpu_to_be32(session->cmdsn);
229 session->cmdsn++; 223 session->cmdsn++;
230 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 224 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
@@ -232,17 +226,17 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
232 if (cmd_len < ISCSI_CDB_SIZE) 226 if (cmd_len < ISCSI_CDB_SIZE)
233 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); 227 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
234 else if (cmd_len > ISCSI_CDB_SIZE) { 228 else if (cmd_len > ISCSI_CDB_SIZE) {
235 rc = iscsi_prep_ecdb_ahs(ctask); 229 rc = iscsi_prep_ecdb_ahs(task);
236 if (rc) 230 if (rc)
237 return rc; 231 return rc;
238 cmd_len = ISCSI_CDB_SIZE; 232 cmd_len = ISCSI_CDB_SIZE;
239 } 233 }
240 memcpy(hdr->cdb, sc->cmnd, cmd_len); 234 memcpy(hdr->cdb, sc->cmnd, cmd_len);
241 235
242 ctask->imm_count = 0; 236 task->imm_count = 0;
243 if (scsi_bidi_cmnd(sc)) { 237 if (scsi_bidi_cmnd(sc)) {
244 hdr->flags |= ISCSI_FLAG_CMD_READ; 238 hdr->flags |= ISCSI_FLAG_CMD_READ;
245 rc = iscsi_prep_bidi_ahs(ctask); 239 rc = iscsi_prep_bidi_ahs(task);
246 if (rc) 240 if (rc)
247 return rc; 241 return rc;
248 } 242 }
@@ -264,28 +258,28 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
264 * 258 *
265 * pad_count bytes to be sent as zero-padding 259 * pad_count bytes to be sent as zero-padding
266 */ 260 */
267 ctask->unsol_count = 0; 261 task->unsol_count = 0;
268 ctask->unsol_offset = 0; 262 task->unsol_offset = 0;
269 ctask->unsol_datasn = 0; 263 task->unsol_datasn = 0;
270 264
271 if (session->imm_data_en) { 265 if (session->imm_data_en) {
272 if (out_len >= session->first_burst) 266 if (out_len >= session->first_burst)
273 ctask->imm_count = min(session->first_burst, 267 task->imm_count = min(session->first_burst,
274 conn->max_xmit_dlength); 268 conn->max_xmit_dlength);
275 else 269 else
276 ctask->imm_count = min(out_len, 270 task->imm_count = min(out_len,
277 conn->max_xmit_dlength); 271 conn->max_xmit_dlength);
278 hton24(hdr->dlength, ctask->imm_count); 272 hton24(hdr->dlength, task->imm_count);
279 } else 273 } else
280 zero_data(hdr->dlength); 274 zero_data(hdr->dlength);
281 275
282 if (!session->initial_r2t_en) { 276 if (!session->initial_r2t_en) {
283 ctask->unsol_count = min(session->first_burst, out_len) 277 task->unsol_count = min(session->first_burst, out_len)
284 - ctask->imm_count; 278 - task->imm_count;
285 ctask->unsol_offset = ctask->imm_count; 279 task->unsol_offset = task->imm_count;
286 } 280 }
287 281
288 if (!ctask->unsol_count) 282 if (!task->unsol_count)
289 /* No unsolicit Data-Out's */ 283 /* No unsolicit Data-Out's */
290 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 284 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
291 } else { 285 } else {
@@ -298,7 +292,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
298 } 292 }
299 293
300 /* calculate size of additional header segments (AHSs) */ 294 /* calculate size of additional header segments (AHSs) */
301 hdrlength = ctask->hdr_len - sizeof(*hdr); 295 hdrlength = task->hdr_len - sizeof(*hdr);
302 296
303 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1)); 297 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
304 hdrlength /= ISCSI_PAD_LEN; 298 hdrlength /= ISCSI_PAD_LEN;
@@ -306,76 +300,115 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
306 WARN_ON(hdrlength >= 256); 300 WARN_ON(hdrlength >= 256);
307 hdr->hlength = hdrlength & 0xFF; 301 hdr->hlength = hdrlength & 0xFF;
308 302
309 if (conn->session->tt->init_cmd_task(conn->ctask)) 303 if (conn->session->tt->init_task &&
310 return EIO; 304 conn->session->tt->init_task(task))
305 return -EIO;
306
307 task->state = ISCSI_TASK_RUNNING;
308 list_move_tail(&task->running, &conn->run_list);
311 309
312 conn->scsicmd_pdus_cnt++; 310 conn->scsicmd_pdus_cnt++;
313 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x " 311 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
314 "len %d bidi_len %d cmdsn %d win %d]\n", 312 "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
315 scsi_bidi_cmnd(sc) ? "bidirectional" : 313 "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
316 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", 314 "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
317 conn->id, sc, sc->cmnd[0], ctask->itt, 315 scsi_bufflen(sc),
318 scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, 316 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
319 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); 317 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
320 return 0; 318 return 0;
321} 319}
322 320
323/** 321/**
324 * iscsi_complete_command - return command back to scsi-ml 322 * iscsi_complete_command - finish a task
325 * @ctask: iscsi cmd task 323 * @task: iscsi cmd task
326 * 324 *
327 * Must be called with session lock. 325 * Must be called with session lock.
328 * This function returns the scsi command to scsi-ml and returns 326 * This function returns the scsi command to scsi-ml or cleans
329 * the cmd task to the pool of available cmd tasks. 327 * up mgmt tasks then returns the task to the pool.
330 */ 328 */
331static void iscsi_complete_command(struct iscsi_cmd_task *ctask) 329static void iscsi_complete_command(struct iscsi_task *task)
332{ 330{
333 struct iscsi_conn *conn = ctask->conn; 331 struct iscsi_conn *conn = task->conn;
334 struct iscsi_session *session = conn->session; 332 struct iscsi_session *session = conn->session;
335 struct scsi_cmnd *sc = ctask->sc; 333 struct scsi_cmnd *sc = task->sc;
336 334
337 ctask->state = ISCSI_TASK_COMPLETED; 335 list_del_init(&task->running);
338 ctask->sc = NULL; 336 task->state = ISCSI_TASK_COMPLETED;
339 /* SCSI eh reuses commands to verify us */ 337 task->sc = NULL;
340 sc->SCp.ptr = NULL; 338
341 if (conn->ctask == ctask) 339 if (conn->task == task)
342 conn->ctask = NULL; 340 conn->task = NULL;
343 list_del_init(&ctask->running); 341 /*
344 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 342 * login task is preallocated so do not free
345 sc->scsi_done(sc); 343 */
344 if (conn->login_task == task)
345 return;
346
347 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
348
349 if (conn->ping_task == task)
350 conn->ping_task = NULL;
351
352 if (sc) {
353 task->sc = NULL;
354 /* SCSI eh reuses commands to verify us */
355 sc->SCp.ptr = NULL;
356 /*
357 * queue command may call this to free the task, but
358 * not have setup the sc callback
359 */
360 if (sc->scsi_done)
361 sc->scsi_done(sc);
362 }
363}
364
365void __iscsi_get_task(struct iscsi_task *task)
366{
367 atomic_inc(&task->refcount);
346} 368}
369EXPORT_SYMBOL_GPL(__iscsi_get_task);
347 370
348static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask) 371static void __iscsi_put_task(struct iscsi_task *task)
349{ 372{
350 atomic_inc(&ctask->refcount); 373 if (atomic_dec_and_test(&task->refcount))
374 iscsi_complete_command(task);
351} 375}
352 376
353static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask) 377void iscsi_put_task(struct iscsi_task *task)
354{ 378{
355 if (atomic_dec_and_test(&ctask->refcount)) 379 struct iscsi_session *session = task->conn->session;
356 iscsi_complete_command(ctask); 380
381 spin_lock_bh(&session->lock);
382 __iscsi_put_task(task);
383 spin_unlock_bh(&session->lock);
357} 384}
385EXPORT_SYMBOL_GPL(iscsi_put_task);
358 386
359/* 387/*
360 * session lock must be held 388 * session lock must be held
361 */ 389 */
362static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 390static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
363 int err) 391 int err)
364{ 392{
365 struct scsi_cmnd *sc; 393 struct scsi_cmnd *sc;
366 394
367 sc = ctask->sc; 395 sc = task->sc;
368 if (!sc) 396 if (!sc)
369 return; 397 return;
370 398
371 if (ctask->state == ISCSI_TASK_PENDING) 399 if (task->state == ISCSI_TASK_PENDING)
372 /* 400 /*
373 * cmd never made it to the xmit thread, so we should not count 401 * cmd never made it to the xmit thread, so we should not count
374 * the cmd in the sequencing 402 * the cmd in the sequencing
375 */ 403 */
376 conn->session->queued_cmdsn--; 404 conn->session->queued_cmdsn--;
377 else 405 else
378 conn->session->tt->cleanup_cmd_task(conn, ctask); 406 conn->session->tt->cleanup_task(conn, task);
407 /*
408 * Check if cleanup_task dropped the lock and the command completed,
409 */
410 if (!task->sc)
411 return;
379 412
380 sc->result = err; 413 sc->result = err;
381 if (!scsi_bidi_cmnd(sc)) 414 if (!scsi_bidi_cmnd(sc))
@@ -384,39 +417,63 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
384 scsi_out(sc)->resid = scsi_out(sc)->length; 417 scsi_out(sc)->resid = scsi_out(sc)->length;
385 scsi_in(sc)->resid = scsi_in(sc)->length; 418 scsi_in(sc)->resid = scsi_in(sc)->length;
386 } 419 }
387 if (conn->ctask == ctask) 420
388 conn->ctask = NULL; 421 if (conn->task == task)
422 conn->task = NULL;
389 /* release ref from queuecommand */ 423 /* release ref from queuecommand */
390 __iscsi_put_ctask(ctask); 424 __iscsi_put_task(task);
391} 425}
392 426
393/** 427static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
394 * iscsi_free_mgmt_task - return mgmt task back to pool 428 struct iscsi_task *task)
395 * @conn: iscsi connection
396 * @mtask: mtask
397 *
398 * Must be called with session lock.
399 */
400void iscsi_free_mgmt_task(struct iscsi_conn *conn,
401 struct iscsi_mgmt_task *mtask)
402{ 429{
403 list_del_init(&mtask->running); 430 struct iscsi_session *session = conn->session;
404 if (conn->login_mtask == mtask) 431 struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
405 return; 432 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
433
434 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
435 return -ENOTCONN;
436
437 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
438 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
439 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
440 /*
441 * pre-format CmdSN for outgoing PDU.
442 */
443 nop->cmdsn = cpu_to_be32(session->cmdsn);
444 if (hdr->itt != RESERVED_ITT) {
445 hdr->itt = build_itt(task->itt, session->age);
446 /*
447 * TODO: We always use immediate, so we never hit this.
448 * If we start to send tmfs or nops as non-immediate then
449 * we should start checking the cmdsn numbers for mgmt tasks.
450 */
451 if (conn->c_stage == ISCSI_CONN_STARTED &&
452 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
453 session->queued_cmdsn++;
454 session->cmdsn++;
455 }
456 }
406 457
407 if (conn->ping_mtask == mtask) 458 if (session->tt->init_task)
408 conn->ping_mtask = NULL; 459 session->tt->init_task(task);
409 __kfifo_put(conn->session->mgmtpool.queue, 460
410 (void*)&mtask, sizeof(void*)); 461 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
462 session->state = ISCSI_STATE_LOGGING_OUT;
463
464 list_move_tail(&task->running, &conn->mgmt_run_list);
465 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
466 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
467 task->data_count);
468 return 0;
411} 469}
412EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
413 470
414static struct iscsi_mgmt_task * 471static struct iscsi_task *
415__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 472__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
416 char *data, uint32_t data_size) 473 char *data, uint32_t data_size)
417{ 474{
418 struct iscsi_session *session = conn->session; 475 struct iscsi_session *session = conn->session;
419 struct iscsi_mgmt_task *mtask; 476 struct iscsi_task *task;
420 477
421 if (session->state == ISCSI_STATE_TERMINATE) 478 if (session->state == ISCSI_STATE_TERMINATE)
422 return NULL; 479 return NULL;
@@ -426,29 +483,56 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
426 /* 483 /*
427 * Login and Text are sent serially, in 484 * Login and Text are sent serially, in
428 * request-followed-by-response sequence. 485 * request-followed-by-response sequence.
429 * Same mtask can be used. Same ITT must be used. 486 * Same task can be used. Same ITT must be used.
430 * Note that login_mtask is preallocated at conn_create(). 487 * Note that login_task is preallocated at conn_create().
431 */ 488 */
432 mtask = conn->login_mtask; 489 task = conn->login_task;
433 else { 490 else {
434 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 491 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
435 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 492 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
436 493
437 if (!__kfifo_get(session->mgmtpool.queue, 494 if (!__kfifo_get(session->cmdpool.queue,
438 (void*)&mtask, sizeof(void*))) 495 (void*)&task, sizeof(void*)))
439 return NULL; 496 return NULL;
497
498 if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
499 hdr->ttt == RESERVED_ITT) {
500 conn->ping_task = task;
501 conn->last_ping = jiffies;
502 }
440 } 503 }
504 /*
505 * released in complete pdu for task we expect a response for, and
506 * released by the lld when it has transmitted the task for
507 * pdus we do not expect a response for.
508 */
509 atomic_set(&task->refcount, 1);
510 task->conn = conn;
511 task->sc = NULL;
441 512
442 if (data_size) { 513 if (data_size) {
443 memcpy(mtask->data, data, data_size); 514 memcpy(task->data, data, data_size);
444 mtask->data_count = data_size; 515 task->data_count = data_size;
516 } else
517 task->data_count = 0;
518
519 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
520 INIT_LIST_HEAD(&task->running);
521 list_add_tail(&task->running, &conn->mgmtqueue);
522
523 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
524 if (iscsi_prep_mgmt_task(conn, task)) {
525 __iscsi_put_task(task);
526 return NULL;
527 }
528
529 if (session->tt->xmit_task(task))
530 task = NULL;
531
445 } else 532 } else
446 mtask->data_count = 0; 533 scsi_queue_work(conn->session->host, &conn->xmitwork);
447 534
448 memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr)); 535 return task;
449 INIT_LIST_HEAD(&mtask->running);
450 list_add_tail(&mtask->running, &conn->mgmtqueue);
451 return mtask;
452} 536}
453 537
454int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 538int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -462,7 +546,6 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
462 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size)) 546 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
463 err = -EPERM; 547 err = -EPERM;
464 spin_unlock_bh(&session->lock); 548 spin_unlock_bh(&session->lock);
465 scsi_queue_work(session->host, &conn->xmitwork);
466 return err; 549 return err;
467} 550}
468EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); 551EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -471,7 +554,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
471 * iscsi_cmd_rsp - SCSI Command Response processing 554 * iscsi_cmd_rsp - SCSI Command Response processing
472 * @conn: iscsi connection 555 * @conn: iscsi connection
473 * @hdr: iscsi header 556 * @hdr: iscsi header
474 * @ctask: scsi command task 557 * @task: scsi command task
475 * @data: cmd data buffer 558 * @data: cmd data buffer
476 * @datalen: len of buffer 559 * @datalen: len of buffer
477 * 560 *
@@ -479,12 +562,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
479 * then completes the command and task. 562 * then completes the command and task.
480 **/ 563 **/
481static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 564static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
482 struct iscsi_cmd_task *ctask, char *data, 565 struct iscsi_task *task, char *data,
483 int datalen) 566 int datalen)
484{ 567{
485 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; 568 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
486 struct iscsi_session *session = conn->session; 569 struct iscsi_session *session = conn->session;
487 struct scsi_cmnd *sc = ctask->sc; 570 struct scsi_cmnd *sc = task->sc;
488 571
489 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 572 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
490 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 573 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
@@ -508,7 +591,7 @@ invalid_datalen:
508 goto out; 591 goto out;
509 } 592 }
510 593
511 senselen = be16_to_cpu(get_unaligned((__be16 *) data)); 594 senselen = get_unaligned_be16(data);
512 if (datalen < senselen) 595 if (datalen < senselen)
513 goto invalid_datalen; 596 goto invalid_datalen;
514 597
@@ -544,10 +627,10 @@ invalid_datalen:
544 } 627 }
545out: 628out:
546 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 629 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
547 (long)sc, sc->result, ctask->itt); 630 (long)sc, sc->result, task->itt);
548 conn->scsirsp_pdus_cnt++; 631 conn->scsirsp_pdus_cnt++;
549 632
550 __iscsi_put_ctask(ctask); 633 __iscsi_put_task(task);
551} 634}
552 635
553static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 636static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -572,9 +655,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
572static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) 655static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
573{ 656{
574 struct iscsi_nopout hdr; 657 struct iscsi_nopout hdr;
575 struct iscsi_mgmt_task *mtask; 658 struct iscsi_task *task;
576 659
577 if (!rhdr && conn->ping_mtask) 660 if (!rhdr && conn->ping_task)
578 return; 661 return;
579 662
580 memset(&hdr, 0, sizeof(struct iscsi_nopout)); 663 memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -588,18 +671,9 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
588 } else 671 } else
589 hdr.ttt = RESERVED_ITT; 672 hdr.ttt = RESERVED_ITT;
590 673
591 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 674 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
592 if (!mtask) { 675 if (!task)
593 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); 676 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
594 return;
595 }
596
597 /* only track our nops */
598 if (!rhdr) {
599 conn->ping_mtask = mtask;
600 conn->last_ping = jiffies;
601 }
602 scsi_queue_work(conn->session->host, &conn->xmitwork);
603} 677}
604 678
605static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 679static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -628,6 +702,31 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
628} 702}
629 703
630/** 704/**
705 * iscsi_itt_to_task - look up task by itt
706 * @conn: iscsi connection
707 * @itt: itt
708 *
709 * This should be used for mgmt tasks like login and nops, or if
710 * the LDD's itt space does not include the session age.
711 *
712 * The session lock must be held.
713 */
714static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
715{
716 struct iscsi_session *session = conn->session;
717 uint32_t i;
718
719 if (itt == RESERVED_ITT)
720 return NULL;
721
722 i = get_itt(itt);
723 if (i >= session->cmds_max)
724 return NULL;
725
726 return session->cmds[i];
727}
728
729/**
631 * __iscsi_complete_pdu - complete pdu 730 * __iscsi_complete_pdu - complete pdu
632 * @conn: iscsi conn 731 * @conn: iscsi conn
633 * @hdr: iscsi header 732 * @hdr: iscsi header
@@ -638,108 +737,28 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
638 * queuecommand or send generic. session lock must be held and verify 737 * queuecommand or send generic. session lock must be held and verify
639 * itt must have been called. 738 * itt must have been called.
640 */ 739 */
641static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 740int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
642 char *data, int datalen) 741 char *data, int datalen)
643{ 742{
644 struct iscsi_session *session = conn->session; 743 struct iscsi_session *session = conn->session;
645 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; 744 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
646 struct iscsi_cmd_task *ctask; 745 struct iscsi_task *task;
647 struct iscsi_mgmt_task *mtask;
648 uint32_t itt; 746 uint32_t itt;
649 747
650 conn->last_recv = jiffies; 748 conn->last_recv = jiffies;
749 rc = iscsi_verify_itt(conn, hdr->itt);
750 if (rc)
751 return rc;
752
651 if (hdr->itt != RESERVED_ITT) 753 if (hdr->itt != RESERVED_ITT)
652 itt = get_itt(hdr->itt); 754 itt = get_itt(hdr->itt);
653 else 755 else
654 itt = ~0U; 756 itt = ~0U;
655 757
656 if (itt < session->cmds_max) { 758 debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
657 ctask = session->cmds[itt]; 759 opcode, conn->id, itt, datalen);
658
659 debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
660 opcode, conn->id, ctask->itt, datalen);
661
662 switch(opcode) {
663 case ISCSI_OP_SCSI_CMD_RSP:
664 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
665 iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
666 datalen);
667 break;
668 case ISCSI_OP_SCSI_DATA_IN:
669 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
670 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
671 conn->scsirsp_pdus_cnt++;
672 __iscsi_put_ctask(ctask);
673 }
674 break;
675 case ISCSI_OP_R2T:
676 /* LLD handles this for now */
677 break;
678 default:
679 rc = ISCSI_ERR_BAD_OPCODE;
680 break;
681 }
682 } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
683 itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
684 mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
685
686 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
687 opcode, conn->id, mtask->itt, datalen);
688 760
689 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 761 if (itt == ~0U) {
690 switch(opcode) {
691 case ISCSI_OP_LOGOUT_RSP:
692 if (datalen) {
693 rc = ISCSI_ERR_PROTO;
694 break;
695 }
696 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
697 /* fall through */
698 case ISCSI_OP_LOGIN_RSP:
699 case ISCSI_OP_TEXT_RSP:
700 /*
701 * login related PDU's exp_statsn is handled in
702 * userspace
703 */
704 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
705 rc = ISCSI_ERR_CONN_FAILED;
706 iscsi_free_mgmt_task(conn, mtask);
707 break;
708 case ISCSI_OP_SCSI_TMFUNC_RSP:
709 if (datalen) {
710 rc = ISCSI_ERR_PROTO;
711 break;
712 }
713
714 iscsi_tmf_rsp(conn, hdr);
715 iscsi_free_mgmt_task(conn, mtask);
716 break;
717 case ISCSI_OP_NOOP_IN:
718 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
719 datalen) {
720 rc = ISCSI_ERR_PROTO;
721 break;
722 }
723 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
724
725 if (conn->ping_mtask != mtask) {
726 /*
727 * If this is not in response to one of our
728 * nops then it must be from userspace.
729 */
730 if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
731 datalen))
732 rc = ISCSI_ERR_CONN_FAILED;
733 } else
734 mod_timer(&conn->transport_timer,
735 jiffies + conn->recv_timeout);
736 iscsi_free_mgmt_task(conn, mtask);
737 break;
738 default:
739 rc = ISCSI_ERR_BAD_OPCODE;
740 break;
741 }
742 } else if (itt == ~0U) {
743 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 762 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
744 763
745 switch(opcode) { 764 switch(opcode) {
@@ -766,11 +785,104 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
766 rc = ISCSI_ERR_BAD_OPCODE; 785 rc = ISCSI_ERR_BAD_OPCODE;
767 break; 786 break;
768 } 787 }
769 } else 788 goto out;
770 rc = ISCSI_ERR_BAD_ITT; 789 }
771 790
791 switch(opcode) {
792 case ISCSI_OP_SCSI_CMD_RSP:
793 case ISCSI_OP_SCSI_DATA_IN:
794 task = iscsi_itt_to_ctask(conn, hdr->itt);
795 if (!task)
796 return ISCSI_ERR_BAD_ITT;
797 break;
798 case ISCSI_OP_R2T:
799 /*
800 * LLD handles R2Ts if they need to.
801 */
802 return 0;
803 case ISCSI_OP_LOGOUT_RSP:
804 case ISCSI_OP_LOGIN_RSP:
805 case ISCSI_OP_TEXT_RSP:
806 case ISCSI_OP_SCSI_TMFUNC_RSP:
807 case ISCSI_OP_NOOP_IN:
808 task = iscsi_itt_to_task(conn, hdr->itt);
809 if (!task)
810 return ISCSI_ERR_BAD_ITT;
811 break;
812 default:
813 return ISCSI_ERR_BAD_OPCODE;
814 }
815
816 switch(opcode) {
817 case ISCSI_OP_SCSI_CMD_RSP:
818 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
819 break;
820 case ISCSI_OP_SCSI_DATA_IN:
821 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
822 conn->scsirsp_pdus_cnt++;
823 iscsi_update_cmdsn(session,
824 (struct iscsi_nopin*) hdr);
825 __iscsi_put_task(task);
826 }
827 break;
828 case ISCSI_OP_LOGOUT_RSP:
829 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
830 if (datalen) {
831 rc = ISCSI_ERR_PROTO;
832 break;
833 }
834 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
835 goto recv_pdu;
836 case ISCSI_OP_LOGIN_RSP:
837 case ISCSI_OP_TEXT_RSP:
838 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
839 /*
840 * login related PDU's exp_statsn is handled in
841 * userspace
842 */
843 goto recv_pdu;
844 case ISCSI_OP_SCSI_TMFUNC_RSP:
845 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
846 if (datalen) {
847 rc = ISCSI_ERR_PROTO;
848 break;
849 }
850
851 iscsi_tmf_rsp(conn, hdr);
852 __iscsi_put_task(task);
853 break;
854 case ISCSI_OP_NOOP_IN:
855 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
856 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
857 rc = ISCSI_ERR_PROTO;
858 break;
859 }
860 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
861
862 if (conn->ping_task != task)
863 /*
864 * If this is not in response to one of our
865 * nops then it must be from userspace.
866 */
867 goto recv_pdu;
868
869 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
870 __iscsi_put_task(task);
871 break;
872 default:
873 rc = ISCSI_ERR_BAD_OPCODE;
874 break;
875 }
876
877out:
878 return rc;
879recv_pdu:
880 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
881 rc = ISCSI_ERR_CONN_FAILED;
882 __iscsi_put_task(task);
772 return rc; 883 return rc;
773} 884}
885EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
774 886
775int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 887int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
776 char *data, int datalen) 888 char *data, int datalen)
@@ -784,51 +896,63 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
784} 896}
785EXPORT_SYMBOL_GPL(iscsi_complete_pdu); 897EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
786 898
787/* verify itt (itt encoding: age+cid+itt) */ 899int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
788int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
789 uint32_t *ret_itt)
790{ 900{
791 struct iscsi_session *session = conn->session; 901 struct iscsi_session *session = conn->session;
792 struct iscsi_cmd_task *ctask; 902 uint32_t i;
793 uint32_t itt;
794 903
795 if (hdr->itt != RESERVED_ITT) { 904 if (itt == RESERVED_ITT)
796 if (((__force u32)hdr->itt & ISCSI_AGE_MASK) != 905 return 0;
797 (session->age << ISCSI_AGE_SHIFT)) {
798 iscsi_conn_printk(KERN_ERR, conn,
799 "received itt %x expected session "
800 "age (%x)\n", (__force u32)hdr->itt,
801 session->age & ISCSI_AGE_MASK);
802 return ISCSI_ERR_BAD_ITT;
803 }
804 906
805 itt = get_itt(hdr->itt); 907 if (((__force u32)itt & ISCSI_AGE_MASK) !=
806 } else 908 (session->age << ISCSI_AGE_SHIFT)) {
807 itt = ~0U; 909 iscsi_conn_printk(KERN_ERR, conn,
910 "received itt %x expected session age (%x)\n",
911 (__force u32)itt, session->age);
912 return ISCSI_ERR_BAD_ITT;
913 }
808 914
809 if (itt < session->cmds_max) { 915 i = get_itt(itt);
810 ctask = session->cmds[itt]; 916 if (i >= session->cmds_max) {
917 iscsi_conn_printk(KERN_ERR, conn,
918 "received invalid itt index %u (max cmds "
919 "%u.\n", i, session->cmds_max);
920 return ISCSI_ERR_BAD_ITT;
921 }
922 return 0;
923}
924EXPORT_SYMBOL_GPL(iscsi_verify_itt);
811 925
812 if (!ctask->sc) { 926/**
813 iscsi_conn_printk(KERN_INFO, conn, "dropping ctask " 927 * iscsi_itt_to_ctask - look up ctask by itt
814 "with itt 0x%x\n", ctask->itt); 928 * @conn: iscsi connection
815 /* force drop */ 929 * @itt: itt
816 return ISCSI_ERR_NO_SCSI_CMD; 930 *
817 } 931 * This should be used for cmd tasks.
932 *
933 * The session lock must be held.
934 */
935struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
936{
937 struct iscsi_task *task;
818 938
819 if (ctask->sc->SCp.phase != session->age) { 939 if (iscsi_verify_itt(conn, itt))
820 iscsi_conn_printk(KERN_ERR, conn, 940 return NULL;
821 "iscsi: ctask's session age %d, " 941
822 "expected %d\n", ctask->sc->SCp.phase, 942 task = iscsi_itt_to_task(conn, itt);
823 session->age); 943 if (!task || !task->sc)
824 return ISCSI_ERR_SESSION_FAILED; 944 return NULL;
825 } 945
946 if (task->sc->SCp.phase != conn->session->age) {
947 iscsi_session_printk(KERN_ERR, conn->session,
948 "task's session age %d, expected %d\n",
949 task->sc->SCp.phase, conn->session->age);
950 return NULL;
826 } 951 }
827 952
828 *ret_itt = itt; 953 return task;
829 return 0;
830} 954}
831EXPORT_SYMBOL_GPL(iscsi_verify_itt); 955EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
832 956
833void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) 957void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
834{ 958{
@@ -850,61 +974,6 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
850} 974}
851EXPORT_SYMBOL_GPL(iscsi_conn_failure); 975EXPORT_SYMBOL_GPL(iscsi_conn_failure);
852 976
853static void iscsi_prep_mtask(struct iscsi_conn *conn,
854 struct iscsi_mgmt_task *mtask)
855{
856 struct iscsi_session *session = conn->session;
857 struct iscsi_hdr *hdr = mtask->hdr;
858 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
859
860 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
861 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
862 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
863 /*
864 * pre-format CmdSN for outgoing PDU.
865 */
866 nop->cmdsn = cpu_to_be32(session->cmdsn);
867 if (hdr->itt != RESERVED_ITT) {
868 hdr->itt = build_itt(mtask->itt, session->age);
869 /*
870 * TODO: We always use immediate, so we never hit this.
871 * If we start to send tmfs or nops as non-immediate then
872 * we should start checking the cmdsn numbers for mgmt tasks.
873 */
874 if (conn->c_stage == ISCSI_CONN_STARTED &&
875 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
876 session->queued_cmdsn++;
877 session->cmdsn++;
878 }
879 }
880
881 if (session->tt->init_mgmt_task)
882 session->tt->init_mgmt_task(conn, mtask);
883
884 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
885 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
886 mtask->data_count);
887}
888
889static int iscsi_xmit_mtask(struct iscsi_conn *conn)
890{
891 struct iscsi_hdr *hdr = conn->mtask->hdr;
892 int rc;
893
894 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
895 conn->session->state = ISCSI_STATE_LOGGING_OUT;
896 spin_unlock_bh(&conn->session->lock);
897
898 rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
899 spin_lock_bh(&conn->session->lock);
900 if (rc)
901 return rc;
902
903 /* done with this in-progress mtask */
904 conn->mtask = NULL;
905 return 0;
906}
907
908static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) 977static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
909{ 978{
910 struct iscsi_session *session = conn->session; 979 struct iscsi_session *session = conn->session;
@@ -922,37 +991,38 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
922 return 0; 991 return 0;
923} 992}
924 993
925static int iscsi_xmit_ctask(struct iscsi_conn *conn) 994static int iscsi_xmit_task(struct iscsi_conn *conn)
926{ 995{
927 struct iscsi_cmd_task *ctask = conn->ctask; 996 struct iscsi_task *task = conn->task;
928 int rc; 997 int rc;
929 998
930 __iscsi_get_ctask(ctask); 999 __iscsi_get_task(task);
931 spin_unlock_bh(&conn->session->lock); 1000 spin_unlock_bh(&conn->session->lock);
932 rc = conn->session->tt->xmit_cmd_task(conn, ctask); 1001 rc = conn->session->tt->xmit_task(task);
933 spin_lock_bh(&conn->session->lock); 1002 spin_lock_bh(&conn->session->lock);
934 __iscsi_put_ctask(ctask); 1003 __iscsi_put_task(task);
935 if (!rc) 1004 if (!rc)
936 /* done with this ctask */ 1005 /* done with this task */
937 conn->ctask = NULL; 1006 conn->task = NULL;
938 return rc; 1007 return rc;
939} 1008}
940 1009
941/** 1010/**
942 * iscsi_requeue_ctask - requeue ctask to run from session workqueue 1011 * iscsi_requeue_task - requeue task to run from session workqueue
943 * @ctask: ctask to requeue 1012 * @task: task to requeue
944 * 1013 *
945 * LLDs that need to run a ctask from the session workqueue should call 1014 * LLDs that need to run a task from the session workqueue should call
946 * this. The session lock must be held. 1015 * this. The session lock must be held. This should only be called
1016 * by software drivers.
947 */ 1017 */
948void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask) 1018void iscsi_requeue_task(struct iscsi_task *task)
949{ 1019{
950 struct iscsi_conn *conn = ctask->conn; 1020 struct iscsi_conn *conn = task->conn;
951 1021
952 list_move_tail(&ctask->running, &conn->requeue); 1022 list_move_tail(&task->running, &conn->requeue);
953 scsi_queue_work(conn->session->host, &conn->xmitwork); 1023 scsi_queue_work(conn->session->host, &conn->xmitwork);
954} 1024}
955EXPORT_SYMBOL_GPL(iscsi_requeue_ctask); 1025EXPORT_SYMBOL_GPL(iscsi_requeue_task);
956 1026
957/** 1027/**
958 * iscsi_data_xmit - xmit any command into the scheduled connection 1028 * iscsi_data_xmit - xmit any command into the scheduled connection
@@ -974,14 +1044,8 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
974 return -ENODATA; 1044 return -ENODATA;
975 } 1045 }
976 1046
977 if (conn->ctask) { 1047 if (conn->task) {
978 rc = iscsi_xmit_ctask(conn); 1048 rc = iscsi_xmit_task(conn);
979 if (rc)
980 goto again;
981 }
982
983 if (conn->mtask) {
984 rc = iscsi_xmit_mtask(conn);
985 if (rc) 1049 if (rc)
986 goto again; 1050 goto again;
987 } 1051 }
@@ -993,17 +1057,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
993 */ 1057 */
994check_mgmt: 1058check_mgmt:
995 while (!list_empty(&conn->mgmtqueue)) { 1059 while (!list_empty(&conn->mgmtqueue)) {
996 conn->mtask = list_entry(conn->mgmtqueue.next, 1060 conn->task = list_entry(conn->mgmtqueue.next,
997 struct iscsi_mgmt_task, running); 1061 struct iscsi_task, running);
998 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1062 if (iscsi_prep_mgmt_task(conn, conn->task)) {
999 iscsi_free_mgmt_task(conn, conn->mtask); 1063 __iscsi_put_task(conn->task);
1000 conn->mtask = NULL; 1064 conn->task = NULL;
1001 continue; 1065 continue;
1002 } 1066 }
1003 1067 rc = iscsi_xmit_task(conn);
1004 iscsi_prep_mtask(conn, conn->mtask);
1005 list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
1006 rc = iscsi_xmit_mtask(conn);
1007 if (rc) 1068 if (rc)
1008 goto again; 1069 goto again;
1009 } 1070 }
@@ -1013,24 +1074,21 @@ check_mgmt:
1013 if (conn->tmf_state == TMF_QUEUED) 1074 if (conn->tmf_state == TMF_QUEUED)
1014 break; 1075 break;
1015 1076
1016 conn->ctask = list_entry(conn->xmitqueue.next, 1077 conn->task = list_entry(conn->xmitqueue.next,
1017 struct iscsi_cmd_task, running); 1078 struct iscsi_task, running);
1018 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1079 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1019 fail_command(conn, conn->ctask, DID_IMM_RETRY << 16); 1080 fail_command(conn, conn->task, DID_IMM_RETRY << 16);
1020 continue; 1081 continue;
1021 } 1082 }
1022 if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) { 1083 if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
1023 fail_command(conn, conn->ctask, DID_ABORT << 16); 1084 fail_command(conn, conn->task, DID_ABORT << 16);
1024 continue; 1085 continue;
1025 } 1086 }
1026 1087 rc = iscsi_xmit_task(conn);
1027 conn->ctask->state = ISCSI_TASK_RUNNING;
1028 list_move_tail(conn->xmitqueue.next, &conn->run_list);
1029 rc = iscsi_xmit_ctask(conn);
1030 if (rc) 1088 if (rc)
1031 goto again; 1089 goto again;
1032 /* 1090 /*
1033 * we could continuously get new ctask requests so 1091 * we could continuously get new task requests so
1034 * we need to check the mgmt queue for nops that need to 1092 * we need to check the mgmt queue for nops that need to
1035 * be sent to aviod starvation 1093 * be sent to aviod starvation
1036 */ 1094 */
@@ -1048,11 +1106,11 @@ check_mgmt:
1048 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 1106 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1049 break; 1107 break;
1050 1108
1051 conn->ctask = list_entry(conn->requeue.next, 1109 conn->task = list_entry(conn->requeue.next,
1052 struct iscsi_cmd_task, running); 1110 struct iscsi_task, running);
1053 conn->ctask->state = ISCSI_TASK_RUNNING; 1111 conn->task->state = ISCSI_TASK_RUNNING;
1054 list_move_tail(conn->requeue.next, &conn->run_list); 1112 list_move_tail(conn->requeue.next, &conn->run_list);
1055 rc = iscsi_xmit_ctask(conn); 1113 rc = iscsi_xmit_task(conn);
1056 if (rc) 1114 if (rc)
1057 goto again; 1115 goto again;
1058 if (!list_empty(&conn->mgmtqueue)) 1116 if (!list_empty(&conn->mgmtqueue))
@@ -1096,11 +1154,12 @@ enum {
1096 1154
1097int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 1155int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1098{ 1156{
1157 struct iscsi_cls_session *cls_session;
1099 struct Scsi_Host *host; 1158 struct Scsi_Host *host;
1100 int reason = 0; 1159 int reason = 0;
1101 struct iscsi_session *session; 1160 struct iscsi_session *session;
1102 struct iscsi_conn *conn; 1161 struct iscsi_conn *conn;
1103 struct iscsi_cmd_task *ctask = NULL; 1162 struct iscsi_task *task = NULL;
1104 1163
1105 sc->scsi_done = done; 1164 sc->scsi_done = done;
1106 sc->result = 0; 1165 sc->result = 0;
@@ -1109,10 +1168,11 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1109 host = sc->device->host; 1168 host = sc->device->host;
1110 spin_unlock(host->host_lock); 1169 spin_unlock(host->host_lock);
1111 1170
1112 session = iscsi_hostdata(host->hostdata); 1171 cls_session = starget_to_session(scsi_target(sc->device));
1172 session = cls_session->dd_data;
1113 spin_lock(&session->lock); 1173 spin_lock(&session->lock);
1114 1174
1115 reason = iscsi_session_chkready(session_to_cls(session)); 1175 reason = iscsi_session_chkready(cls_session);
1116 if (reason) { 1176 if (reason) {
1117 sc->result = reason; 1177 sc->result = reason;
1118 goto fault; 1178 goto fault;
@@ -1167,26 +1227,39 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1167 goto reject; 1227 goto reject;
1168 } 1228 }
1169 1229
1170 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask, 1230 if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
1171 sizeof(void*))) { 1231 sizeof(void*))) {
1172 reason = FAILURE_OOM; 1232 reason = FAILURE_OOM;
1173 goto reject; 1233 goto reject;
1174 } 1234 }
1175 session->queued_cmdsn++;
1176
1177 sc->SCp.phase = session->age; 1235 sc->SCp.phase = session->age;
1178 sc->SCp.ptr = (char *)ctask; 1236 sc->SCp.ptr = (char *)task;
1179 1237
1180 atomic_set(&ctask->refcount, 1); 1238 atomic_set(&task->refcount, 1);
1181 ctask->state = ISCSI_TASK_PENDING; 1239 task->state = ISCSI_TASK_PENDING;
1182 ctask->conn = conn; 1240 task->conn = conn;
1183 ctask->sc = sc; 1241 task->sc = sc;
1184 INIT_LIST_HEAD(&ctask->running); 1242 INIT_LIST_HEAD(&task->running);
1243 list_add_tail(&task->running, &conn->xmitqueue);
1244
1245 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
1246 if (iscsi_prep_scsi_cmd_pdu(task)) {
1247 sc->result = DID_ABORT << 16;
1248 sc->scsi_done = NULL;
1249 iscsi_complete_command(task);
1250 goto fault;
1251 }
1252 if (session->tt->xmit_task(task)) {
1253 sc->scsi_done = NULL;
1254 iscsi_complete_command(task);
1255 reason = FAILURE_SESSION_NOT_READY;
1256 goto reject;
1257 }
1258 } else
1259 scsi_queue_work(session->host, &conn->xmitwork);
1185 1260
1186 list_add_tail(&ctask->running, &conn->xmitqueue); 1261 session->queued_cmdsn++;
1187 spin_unlock(&session->lock); 1262 spin_unlock(&session->lock);
1188
1189 scsi_queue_work(host, &conn->xmitwork);
1190 spin_lock(host->host_lock); 1263 spin_lock(host->host_lock);
1191 return 0; 1264 return 0;
1192 1265
@@ -1205,7 +1278,7 @@ fault:
1205 scsi_out(sc)->resid = scsi_out(sc)->length; 1278 scsi_out(sc)->resid = scsi_out(sc)->length;
1206 scsi_in(sc)->resid = scsi_in(sc)->length; 1279 scsi_in(sc)->resid = scsi_in(sc)->length;
1207 } 1280 }
1208 sc->scsi_done(sc); 1281 done(sc);
1209 spin_lock(host->host_lock); 1282 spin_lock(host->host_lock);
1210 return 0; 1283 return 0;
1211} 1284}
@@ -1222,7 +1295,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
1222 1295
1223void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) 1296void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
1224{ 1297{
1225 struct iscsi_session *session = class_to_transport_session(cls_session); 1298 struct iscsi_session *session = cls_session->dd_data;
1226 1299
1227 spin_lock_bh(&session->lock); 1300 spin_lock_bh(&session->lock);
1228 if (session->state != ISCSI_STATE_LOGGED_IN) { 1301 if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -1236,9 +1309,13 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
1236 1309
1237int iscsi_eh_host_reset(struct scsi_cmnd *sc) 1310int iscsi_eh_host_reset(struct scsi_cmnd *sc)
1238{ 1311{
1239 struct Scsi_Host *host = sc->device->host; 1312 struct iscsi_cls_session *cls_session;
1240 struct iscsi_session *session = iscsi_hostdata(host->hostdata); 1313 struct iscsi_session *session;
1241 struct iscsi_conn *conn = session->leadconn; 1314 struct iscsi_conn *conn;
1315
1316 cls_session = starget_to_session(scsi_target(sc->device));
1317 session = cls_session->dd_data;
1318 conn = session->leadconn;
1242 1319
1243 mutex_lock(&session->eh_mutex); 1320 mutex_lock(&session->eh_mutex);
1244 spin_lock_bh(&session->lock); 1321 spin_lock_bh(&session->lock);
@@ -1300,11 +1377,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1300 int timeout) 1377 int timeout)
1301{ 1378{
1302 struct iscsi_session *session = conn->session; 1379 struct iscsi_session *session = conn->session;
1303 struct iscsi_mgmt_task *mtask; 1380 struct iscsi_task *task;
1304 1381
1305 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, 1382 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1306 NULL, 0); 1383 NULL, 0);
1307 if (!mtask) { 1384 if (!task) {
1308 spin_unlock_bh(&session->lock); 1385 spin_unlock_bh(&session->lock);
1309 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1386 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1310 spin_lock_bh(&session->lock); 1387 spin_lock_bh(&session->lock);
@@ -1320,7 +1397,6 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1320 1397
1321 spin_unlock_bh(&session->lock); 1398 spin_unlock_bh(&session->lock);
1322 mutex_unlock(&session->eh_mutex); 1399 mutex_unlock(&session->eh_mutex);
1323 scsi_queue_work(session->host, &conn->xmitwork);
1324 1400
1325 /* 1401 /*
1326 * block eh thread until: 1402 * block eh thread until:
@@ -1339,7 +1415,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1339 1415
1340 mutex_lock(&session->eh_mutex); 1416 mutex_lock(&session->eh_mutex);
1341 spin_lock_bh(&session->lock); 1417 spin_lock_bh(&session->lock);
1342 /* if the session drops it will clean up the mtask */ 1418 /* if the session drops it will clean up the task */
1343 if (age != session->age || 1419 if (age != session->age ||
1344 session->state != ISCSI_STATE_LOGGED_IN) 1420 session->state != ISCSI_STATE_LOGGED_IN)
1345 return -ENOTCONN; 1421 return -ENOTCONN;
@@ -1353,48 +1429,51 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1353static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1429static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1354 int error) 1430 int error)
1355{ 1431{
1356 struct iscsi_cmd_task *ctask, *tmp; 1432 struct iscsi_task *task, *tmp;
1357 1433
1358 if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1)) 1434 if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
1359 conn->ctask = NULL; 1435 conn->task = NULL;
1360 1436
1361 /* flush pending */ 1437 /* flush pending */
1362 list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) { 1438 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
1363 if (lun == ctask->sc->device->lun || lun == -1) { 1439 if (lun == task->sc->device->lun || lun == -1) {
1364 debug_scsi("failing pending sc %p itt 0x%x\n", 1440 debug_scsi("failing pending sc %p itt 0x%x\n",
1365 ctask->sc, ctask->itt); 1441 task->sc, task->itt);
1366 fail_command(conn, ctask, error << 16); 1442 fail_command(conn, task, error << 16);
1367 } 1443 }
1368 } 1444 }
1369 1445
1370 list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) { 1446 list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
1371 if (lun == ctask->sc->device->lun || lun == -1) { 1447 if (lun == task->sc->device->lun || lun == -1) {
1372 debug_scsi("failing requeued sc %p itt 0x%x\n", 1448 debug_scsi("failing requeued sc %p itt 0x%x\n",
1373 ctask->sc, ctask->itt); 1449 task->sc, task->itt);
1374 fail_command(conn, ctask, error << 16); 1450 fail_command(conn, task, error << 16);
1375 } 1451 }
1376 } 1452 }
1377 1453
1378 /* fail all other running */ 1454 /* fail all other running */
1379 list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) { 1455 list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
1380 if (lun == ctask->sc->device->lun || lun == -1) { 1456 if (lun == task->sc->device->lun || lun == -1) {
1381 debug_scsi("failing in progress sc %p itt 0x%x\n", 1457 debug_scsi("failing in progress sc %p itt 0x%x\n",
1382 ctask->sc, ctask->itt); 1458 task->sc, task->itt);
1383 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1459 fail_command(conn, task, DID_BUS_BUSY << 16);
1384 } 1460 }
1385 } 1461 }
1386} 1462}
1387 1463
1388static void iscsi_suspend_tx(struct iscsi_conn *conn) 1464void iscsi_suspend_tx(struct iscsi_conn *conn)
1389{ 1465{
1390 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1466 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1391 scsi_flush_work(conn->session->host); 1467 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
1468 scsi_flush_work(conn->session->host);
1392} 1469}
1470EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1393 1471
1394static void iscsi_start_tx(struct iscsi_conn *conn) 1472static void iscsi_start_tx(struct iscsi_conn *conn)
1395{ 1473{
1396 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1474 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1397 scsi_queue_work(conn->session->host, &conn->xmitwork); 1475 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
1476 scsi_queue_work(conn->session->host, &conn->xmitwork);
1398} 1477}
1399 1478
1400static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1479static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1405,7 +1484,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1405 enum scsi_eh_timer_return rc = EH_NOT_HANDLED; 1484 enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
1406 1485
1407 cls_session = starget_to_session(scsi_target(scmd->device)); 1486 cls_session = starget_to_session(scsi_target(scmd->device));
1408 session = class_to_transport_session(cls_session); 1487 session = cls_session->dd_data;
1409 1488
1410 debug_scsi("scsi cmd %p timedout\n", scmd); 1489 debug_scsi("scsi cmd %p timedout\n", scmd);
1411 1490
@@ -1443,7 +1522,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1443 jiffies)) 1522 jiffies))
1444 rc = EH_RESET_TIMER; 1523 rc = EH_RESET_TIMER;
1445 /* if in the middle of checking the transport then give us more time */ 1524 /* if in the middle of checking the transport then give us more time */
1446 if (conn->ping_mtask) 1525 if (conn->ping_task)
1447 rc = EH_RESET_TIMER; 1526 rc = EH_RESET_TIMER;
1448done: 1527done:
1449 spin_unlock(&session->lock); 1528 spin_unlock(&session->lock);
@@ -1467,7 +1546,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1467 1546
1468 recv_timeout *= HZ; 1547 recv_timeout *= HZ;
1469 last_recv = conn->last_recv; 1548 last_recv = conn->last_recv;
1470 if (conn->ping_mtask && 1549 if (conn->ping_task &&
1471 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), 1550 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
1472 jiffies)) { 1551 jiffies)) {
1473 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1552 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
@@ -1493,27 +1572,30 @@ done:
1493 spin_unlock(&session->lock); 1572 spin_unlock(&session->lock);
1494} 1573}
1495 1574
1496static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask, 1575static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
1497 struct iscsi_tm *hdr) 1576 struct iscsi_tm *hdr)
1498{ 1577{
1499 memset(hdr, 0, sizeof(*hdr)); 1578 memset(hdr, 0, sizeof(*hdr));
1500 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 1579 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1501 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; 1580 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
1502 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 1581 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1503 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 1582 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
1504 hdr->rtt = ctask->hdr->itt; 1583 hdr->rtt = task->hdr->itt;
1505 hdr->refcmdsn = ctask->hdr->cmdsn; 1584 hdr->refcmdsn = task->hdr->cmdsn;
1506} 1585}
1507 1586
1508int iscsi_eh_abort(struct scsi_cmnd *sc) 1587int iscsi_eh_abort(struct scsi_cmnd *sc)
1509{ 1588{
1510 struct Scsi_Host *host = sc->device->host; 1589 struct iscsi_cls_session *cls_session;
1511 struct iscsi_session *session = iscsi_hostdata(host->hostdata); 1590 struct iscsi_session *session;
1512 struct iscsi_conn *conn; 1591 struct iscsi_conn *conn;
1513 struct iscsi_cmd_task *ctask; 1592 struct iscsi_task *task;
1514 struct iscsi_tm *hdr; 1593 struct iscsi_tm *hdr;
1515 int rc, age; 1594 int rc, age;
1516 1595
1596 cls_session = starget_to_session(scsi_target(sc->device));
1597 session = cls_session->dd_data;
1598
1517 mutex_lock(&session->eh_mutex); 1599 mutex_lock(&session->eh_mutex);
1518 spin_lock_bh(&session->lock); 1600 spin_lock_bh(&session->lock);
1519 /* 1601 /*
@@ -1542,17 +1624,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1542 conn->eh_abort_cnt++; 1624 conn->eh_abort_cnt++;
1543 age = session->age; 1625 age = session->age;
1544 1626
1545 ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; 1627 task = (struct iscsi_task *)sc->SCp.ptr;
1546 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt); 1628 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
1547 1629
1548 /* ctask completed before time out */ 1630 /* task completed before time out */
1549 if (!ctask->sc) { 1631 if (!task->sc) {
1550 debug_scsi("sc completed while abort in progress\n"); 1632 debug_scsi("sc completed while abort in progress\n");
1551 goto success; 1633 goto success;
1552 } 1634 }
1553 1635
1554 if (ctask->state == ISCSI_TASK_PENDING) { 1636 if (task->state == ISCSI_TASK_PENDING) {
1555 fail_command(conn, ctask, DID_ABORT << 16); 1637 fail_command(conn, task, DID_ABORT << 16);
1556 goto success; 1638 goto success;
1557 } 1639 }
1558 1640
@@ -1562,7 +1644,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1562 conn->tmf_state = TMF_QUEUED; 1644 conn->tmf_state = TMF_QUEUED;
1563 1645
1564 hdr = &conn->tmhdr; 1646 hdr = &conn->tmhdr;
1565 iscsi_prep_abort_task_pdu(ctask, hdr); 1647 iscsi_prep_abort_task_pdu(task, hdr);
1566 1648
1567 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) { 1649 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
1568 rc = FAILED; 1650 rc = FAILED;
@@ -1572,16 +1654,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1572 switch (conn->tmf_state) { 1654 switch (conn->tmf_state) {
1573 case TMF_SUCCESS: 1655 case TMF_SUCCESS:
1574 spin_unlock_bh(&session->lock); 1656 spin_unlock_bh(&session->lock);
1657 /*
1658 * stop tx side incase the target had sent a abort rsp but
1659 * the initiator was still writing out data.
1660 */
1575 iscsi_suspend_tx(conn); 1661 iscsi_suspend_tx(conn);
1576 /* 1662 /*
1577 * clean up task if aborted. grab the recv lock as a writer 1663 * we do not stop the recv side because targets have been
1664 * good and have never sent us a successful tmf response
1665 * then sent more data for the cmd.
1578 */ 1666 */
1579 write_lock_bh(conn->recv_lock);
1580 spin_lock(&session->lock); 1667 spin_lock(&session->lock);
1581 fail_command(conn, ctask, DID_ABORT << 16); 1668 fail_command(conn, task, DID_ABORT << 16);
1582 conn->tmf_state = TMF_INITIAL; 1669 conn->tmf_state = TMF_INITIAL;
1583 spin_unlock(&session->lock); 1670 spin_unlock(&session->lock);
1584 write_unlock_bh(conn->recv_lock);
1585 iscsi_start_tx(conn); 1671 iscsi_start_tx(conn);
1586 goto success_unlocked; 1672 goto success_unlocked;
1587 case TMF_TIMEDOUT: 1673 case TMF_TIMEDOUT:
@@ -1591,7 +1677,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1591 case TMF_NOT_FOUND: 1677 case TMF_NOT_FOUND:
1592 if (!sc->SCp.ptr) { 1678 if (!sc->SCp.ptr) {
1593 conn->tmf_state = TMF_INITIAL; 1679 conn->tmf_state = TMF_INITIAL;
1594 /* ctask completed before tmf abort response */ 1680 /* task completed before tmf abort response */
1595 debug_scsi("sc completed while abort in progress\n"); 1681 debug_scsi("sc completed while abort in progress\n");
1596 goto success; 1682 goto success;
1597 } 1683 }
@@ -1604,7 +1690,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1604success: 1690success:
1605 spin_unlock_bh(&session->lock); 1691 spin_unlock_bh(&session->lock);
1606success_unlocked: 1692success_unlocked:
1607 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); 1693 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
1608 mutex_unlock(&session->eh_mutex); 1694 mutex_unlock(&session->eh_mutex);
1609 return SUCCESS; 1695 return SUCCESS;
1610 1696
@@ -1612,7 +1698,7 @@ failed:
1612 spin_unlock_bh(&session->lock); 1698 spin_unlock_bh(&session->lock);
1613failed_unlocked: 1699failed_unlocked:
1614 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc, 1700 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
1615 ctask ? ctask->itt : 0); 1701 task ? task->itt : 0);
1616 mutex_unlock(&session->eh_mutex); 1702 mutex_unlock(&session->eh_mutex);
1617 return FAILED; 1703 return FAILED;
1618} 1704}
@@ -1630,12 +1716,15 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
1630 1716
1631int iscsi_eh_device_reset(struct scsi_cmnd *sc) 1717int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1632{ 1718{
1633 struct Scsi_Host *host = sc->device->host; 1719 struct iscsi_cls_session *cls_session;
1634 struct iscsi_session *session = iscsi_hostdata(host->hostdata); 1720 struct iscsi_session *session;
1635 struct iscsi_conn *conn; 1721 struct iscsi_conn *conn;
1636 struct iscsi_tm *hdr; 1722 struct iscsi_tm *hdr;
1637 int rc = FAILED; 1723 int rc = FAILED;
1638 1724
1725 cls_session = starget_to_session(scsi_target(sc->device));
1726 session = cls_session->dd_data;
1727
1639 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun); 1728 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
1640 1729
1641 mutex_lock(&session->eh_mutex); 1730 mutex_lock(&session->eh_mutex);
@@ -1678,13 +1767,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1678 spin_unlock_bh(&session->lock); 1767 spin_unlock_bh(&session->lock);
1679 1768
1680 iscsi_suspend_tx(conn); 1769 iscsi_suspend_tx(conn);
1681 /* need to grab the recv lock then session lock */ 1770
1682 write_lock_bh(conn->recv_lock);
1683 spin_lock(&session->lock); 1771 spin_lock(&session->lock);
1684 fail_all_commands(conn, sc->device->lun, DID_ERROR); 1772 fail_all_commands(conn, sc->device->lun, DID_ERROR);
1685 conn->tmf_state = TMF_INITIAL; 1773 conn->tmf_state = TMF_INITIAL;
1686 spin_unlock(&session->lock); 1774 spin_unlock(&session->lock);
1687 write_unlock_bh(conn->recv_lock);
1688 1775
1689 iscsi_start_tx(conn); 1776 iscsi_start_tx(conn);
1690 goto done; 1777 goto done;
@@ -1760,177 +1847,203 @@ void iscsi_pool_free(struct iscsi_pool *q)
1760} 1847}
1761EXPORT_SYMBOL_GPL(iscsi_pool_free); 1848EXPORT_SYMBOL_GPL(iscsi_pool_free);
1762 1849
1763/* 1850/**
1764 * iSCSI Session's hostdata organization: 1851 * iscsi_host_add - add host to system
1852 * @shost: scsi host
1853 * @pdev: parent device
1854 *
1855 * This should be called by partial offload and software iscsi drivers
1856 * to add a host to the system.
1857 */
1858int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
1859{
1860 if (!shost->can_queue)
1861 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
1862
1863 return scsi_add_host(shost, pdev);
1864}
1865EXPORT_SYMBOL_GPL(iscsi_host_add);
1866
1867/**
1868 * iscsi_host_alloc - allocate a host and driver data
1869 * @sht: scsi host template
1870 * @dd_data_size: driver host data size
1871 * @qdepth: default device queue depth
1872 *
1873 * This should be called by partial offload and software iscsi drivers.
1874 * To access the driver specific memory use the iscsi_host_priv() macro.
1875 */
1876struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
1877 int dd_data_size, uint16_t qdepth)
1878{
1879 struct Scsi_Host *shost;
1880
1881 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
1882 if (!shost)
1883 return NULL;
1884 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
1885
1886 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
1887 if (qdepth != 0)
1888 printk(KERN_ERR "iscsi: invalid queue depth of %d. "
1889 "Queue depth must be between 1 and %d.\n",
1890 qdepth, ISCSI_MAX_CMD_PER_LUN);
1891 qdepth = ISCSI_DEF_CMD_PER_LUN;
1892 }
1893 shost->cmd_per_lun = qdepth;
1894 return shost;
1895}
1896EXPORT_SYMBOL_GPL(iscsi_host_alloc);
1897
1898/**
1899 * iscsi_host_remove - remove host and sessions
1900 * @shost: scsi host
1765 * 1901 *
1766 * *------------------* <== hostdata_session(host->hostdata) 1902 * This will also remove any sessions attached to the host, but if userspace
1767 * | ptr to class sess| 1903 * is managing the session at the same time this will break. TODO: add
1768 * |------------------| <== iscsi_hostdata(host->hostdata) 1904 * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
1769 * | iscsi_session | 1905 * does not remove the memory from under us.
1770 * *------------------*
1771 */ 1906 */
1907void iscsi_host_remove(struct Scsi_Host *shost)
1908{
1909 iscsi_host_for_each_session(shost, iscsi_session_teardown);
1910 scsi_remove_host(shost);
1911}
1912EXPORT_SYMBOL_GPL(iscsi_host_remove);
1772 1913
1773#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \ 1914void iscsi_host_free(struct Scsi_Host *shost)
1774 _sz % sizeof(unsigned long)) 1915{
1916 struct iscsi_host *ihost = shost_priv(shost);
1775 1917
1776#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata)) 1918 kfree(ihost->netdev);
1919 kfree(ihost->hwaddress);
1920 kfree(ihost->initiatorname);
1921 scsi_host_put(shost);
1922}
1923EXPORT_SYMBOL_GPL(iscsi_host_free);
1777 1924
1778/** 1925/**
1779 * iscsi_session_setup - create iscsi cls session and host and session 1926 * iscsi_session_setup - create iscsi cls session and host and session
1780 * @scsit: scsi transport template
1781 * @iscsit: iscsi transport template 1927 * @iscsit: iscsi transport template
1782 * @cmds_max: scsi host can queue 1928 * @shost: scsi host
1783 * @qdepth: scsi host cmds per lun 1929 * @cmds_max: session can queue
1784 * @cmd_task_size: LLD ctask private data size 1930 * @cmd_task_size: LLD task private data size
1785 * @mgmt_task_size: LLD mtask private data size
1786 * @initial_cmdsn: initial CmdSN 1931 * @initial_cmdsn: initial CmdSN
1787 * @hostno: host no allocated
1788 * 1932 *
1789 * This can be used by software iscsi_transports that allocate 1933 * This can be used by software iscsi_transports that allocate
1790 * a session per scsi host. 1934 * a session per scsi host.
1791 **/ 1935 *
1936 * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
1937 * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
1938 * for nop handling and login/logout requests.
1939 */
1792struct iscsi_cls_session * 1940struct iscsi_cls_session *
1793iscsi_session_setup(struct iscsi_transport *iscsit, 1941iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
1794 struct scsi_transport_template *scsit, 1942 uint16_t cmds_max, int cmd_task_size,
1795 uint16_t cmds_max, uint16_t qdepth, 1943 uint32_t initial_cmdsn, unsigned int id)
1796 int cmd_task_size, int mgmt_task_size,
1797 uint32_t initial_cmdsn, uint32_t *hostno)
1798{ 1944{
1799 struct Scsi_Host *shost;
1800 struct iscsi_session *session; 1945 struct iscsi_session *session;
1801 struct iscsi_cls_session *cls_session; 1946 struct iscsi_cls_session *cls_session;
1802 int cmd_i; 1947 int cmd_i, scsi_cmds, total_cmds = cmds_max;
1803 1948
1804 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { 1949 if (!total_cmds)
1805 if (qdepth != 0) 1950 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
1806 printk(KERN_ERR "iscsi: invalid queue depth of %d. " 1951 /*
1807 "Queue depth must be between 1 and %d.\n", 1952 * The iscsi layer needs some tasks for nop handling and tmfs,
1808 qdepth, ISCSI_MAX_CMD_PER_LUN); 1953 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
1809 qdepth = ISCSI_DEF_CMD_PER_LUN; 1954 * + 1 command for scsi IO.
1955 */
1956 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
1957 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1958 "must be a power of two that is at least %d.\n",
1959 total_cmds, ISCSI_TOTAL_CMDS_MIN);
1960 return NULL;
1810 } 1961 }
1811 1962
1812 if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET || 1963 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
1813 cmds_max < 2) { 1964 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1814 if (cmds_max != 0) 1965 "must be a power of 2 less than or equal to %d.\n",
1815 printk(KERN_ERR "iscsi: invalid can_queue of %d. " 1966 cmds_max, ISCSI_TOTAL_CMDS_MAX);
1816 "can_queue must be a power of 2 and between " 1967 total_cmds = ISCSI_TOTAL_CMDS_MAX;
1817 "2 and %d - setting to %d.\n", cmds_max,
1818 ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
1819 cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
1820 } 1968 }
1821 1969
1822 shost = scsi_host_alloc(iscsit->host_template, 1970 if (!is_power_of_2(total_cmds)) {
1823 hostdata_privsize(sizeof(*session))); 1971 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1824 if (!shost) 1972 "must be a power of 2.\n", total_cmds);
1825 return NULL; 1973 total_cmds = rounddown_pow_of_two(total_cmds);
1826 1974 if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
1827 /* the iscsi layer takes one task for reserve */ 1975 return NULL;
1828 shost->can_queue = cmds_max - 1; 1976 printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
1829 shost->cmd_per_lun = qdepth; 1977 total_cmds);
1830 shost->max_id = 1; 1978 }
1831 shost->max_channel = 0; 1979 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
1832 shost->max_lun = iscsit->max_lun;
1833 shost->max_cmd_len = iscsit->max_cmd_len;
1834 shost->transportt = scsit;
1835 shost->transportt->create_work_queue = 1;
1836 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
1837 *hostno = shost->host_no;
1838 1980
1839 session = iscsi_hostdata(shost->hostdata); 1981 cls_session = iscsi_alloc_session(shost, iscsit,
1840 memset(session, 0, sizeof(struct iscsi_session)); 1982 sizeof(struct iscsi_session));
1983 if (!cls_session)
1984 return NULL;
1985 session = cls_session->dd_data;
1986 session->cls_session = cls_session;
1841 session->host = shost; 1987 session->host = shost;
1842 session->state = ISCSI_STATE_FREE; 1988 session->state = ISCSI_STATE_FREE;
1843 session->fast_abort = 1; 1989 session->fast_abort = 1;
1844 session->lu_reset_timeout = 15; 1990 session->lu_reset_timeout = 15;
1845 session->abort_timeout = 10; 1991 session->abort_timeout = 10;
1846 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 1992 session->scsi_cmds_max = scsi_cmds;
1847 session->cmds_max = cmds_max; 1993 session->cmds_max = total_cmds;
1848 session->queued_cmdsn = session->cmdsn = initial_cmdsn; 1994 session->queued_cmdsn = session->cmdsn = initial_cmdsn;
1849 session->exp_cmdsn = initial_cmdsn + 1; 1995 session->exp_cmdsn = initial_cmdsn + 1;
1850 session->max_cmdsn = initial_cmdsn + 1; 1996 session->max_cmdsn = initial_cmdsn + 1;
1851 session->max_r2t = 1; 1997 session->max_r2t = 1;
1852 session->tt = iscsit; 1998 session->tt = iscsit;
1853 mutex_init(&session->eh_mutex); 1999 mutex_init(&session->eh_mutex);
2000 spin_lock_init(&session->lock);
1854 2001
1855 /* initialize SCSI PDU commands pool */ 2002 /* initialize SCSI PDU commands pool */
1856 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, 2003 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
1857 (void***)&session->cmds, 2004 (void***)&session->cmds,
1858 cmd_task_size + sizeof(struct iscsi_cmd_task))) 2005 cmd_task_size + sizeof(struct iscsi_task)))
1859 goto cmdpool_alloc_fail; 2006 goto cmdpool_alloc_fail;
1860 2007
1861 /* pre-format cmds pool with ITT */ 2008 /* pre-format cmds pool with ITT */
1862 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 2009 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1863 struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; 2010 struct iscsi_task *task = session->cmds[cmd_i];
1864 2011
1865 if (cmd_task_size) 2012 if (cmd_task_size)
1866 ctask->dd_data = &ctask[1]; 2013 task->dd_data = &task[1];
1867 ctask->itt = cmd_i; 2014 task->itt = cmd_i;
1868 INIT_LIST_HEAD(&ctask->running); 2015 INIT_LIST_HEAD(&task->running);
1869 }
1870
1871 spin_lock_init(&session->lock);
1872
1873 /* initialize immediate command pool */
1874 if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
1875 (void***)&session->mgmt_cmds,
1876 mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
1877 goto mgmtpool_alloc_fail;
1878
1879
1880 /* pre-format immediate cmds pool with ITT */
1881 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
1882 struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
1883
1884 if (mgmt_task_size)
1885 mtask->dd_data = &mtask[1];
1886 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
1887 INIT_LIST_HEAD(&mtask->running);
1888 } 2016 }
1889 2017
1890 if (scsi_add_host(shost, NULL))
1891 goto add_host_fail;
1892
1893 if (!try_module_get(iscsit->owner)) 2018 if (!try_module_get(iscsit->owner))
1894 goto cls_session_fail; 2019 goto module_get_fail;
1895
1896 cls_session = iscsi_create_session(shost, iscsit, 0);
1897 if (!cls_session)
1898 goto module_put;
1899 *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
1900 2020
2021 if (iscsi_add_session(cls_session, id))
2022 goto cls_session_fail;
1901 return cls_session; 2023 return cls_session;
1902 2024
1903module_put:
1904 module_put(iscsit->owner);
1905cls_session_fail: 2025cls_session_fail:
1906 scsi_remove_host(shost); 2026 module_put(iscsit->owner);
1907add_host_fail: 2027module_get_fail:
1908 iscsi_pool_free(&session->mgmtpool);
1909mgmtpool_alloc_fail:
1910 iscsi_pool_free(&session->cmdpool); 2028 iscsi_pool_free(&session->cmdpool);
1911cmdpool_alloc_fail: 2029cmdpool_alloc_fail:
1912 scsi_host_put(shost); 2030 iscsi_free_session(cls_session);
1913 return NULL; 2031 return NULL;
1914} 2032}
1915EXPORT_SYMBOL_GPL(iscsi_session_setup); 2033EXPORT_SYMBOL_GPL(iscsi_session_setup);
1916 2034
1917/** 2035/**
1918 * iscsi_session_teardown - destroy session, host, and cls_session 2036 * iscsi_session_teardown - destroy session, host, and cls_session
1919 * shost: scsi host 2037 * @cls_session: iscsi session
1920 * 2038 *
1921 * This can be used by software iscsi_transports that allocate 2039 * The driver must have called iscsi_remove_session before
1922 * a session per scsi host. 2040 * calling this.
1923 **/ 2041 */
1924void iscsi_session_teardown(struct iscsi_cls_session *cls_session) 2042void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1925{ 2043{
1926 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 2044 struct iscsi_session *session = cls_session->dd_data;
1927 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1928 struct module *owner = cls_session->transport->owner; 2045 struct module *owner = cls_session->transport->owner;
1929 2046
1930 iscsi_remove_session(cls_session);
1931 scsi_remove_host(shost);
1932
1933 iscsi_pool_free(&session->mgmtpool);
1934 iscsi_pool_free(&session->cmdpool); 2047 iscsi_pool_free(&session->cmdpool);
1935 2048
1936 kfree(session->password); 2049 kfree(session->password);
@@ -1938,12 +2051,10 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1938 kfree(session->username); 2051 kfree(session->username);
1939 kfree(session->username_in); 2052 kfree(session->username_in);
1940 kfree(session->targetname); 2053 kfree(session->targetname);
1941 kfree(session->netdev);
1942 kfree(session->hwaddress);
1943 kfree(session->initiatorname); 2054 kfree(session->initiatorname);
2055 kfree(session->ifacename);
1944 2056
1945 iscsi_free_session(cls_session); 2057 iscsi_destroy_session(cls_session);
1946 scsi_host_put(shost);
1947 module_put(owner); 2058 module_put(owner);
1948} 2059}
1949EXPORT_SYMBOL_GPL(iscsi_session_teardown); 2060EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -1951,22 +2062,26 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
1951/** 2062/**
1952 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn 2063 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
1953 * @cls_session: iscsi_cls_session 2064 * @cls_session: iscsi_cls_session
2065 * @dd_size: private driver data size
1954 * @conn_idx: cid 2066 * @conn_idx: cid
1955 **/ 2067 */
1956struct iscsi_cls_conn * 2068struct iscsi_cls_conn *
1957iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) 2069iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2070 uint32_t conn_idx)
1958{ 2071{
1959 struct iscsi_session *session = class_to_transport_session(cls_session); 2072 struct iscsi_session *session = cls_session->dd_data;
1960 struct iscsi_conn *conn; 2073 struct iscsi_conn *conn;
1961 struct iscsi_cls_conn *cls_conn; 2074 struct iscsi_cls_conn *cls_conn;
1962 char *data; 2075 char *data;
1963 2076
1964 cls_conn = iscsi_create_conn(cls_session, conn_idx); 2077 cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
2078 conn_idx);
1965 if (!cls_conn) 2079 if (!cls_conn)
1966 return NULL; 2080 return NULL;
1967 conn = cls_conn->dd_data; 2081 conn = cls_conn->dd_data;
1968 memset(conn, 0, sizeof(*conn)); 2082 memset(conn, 0, sizeof(*conn) + dd_size);
1969 2083
2084 conn->dd_data = cls_conn->dd_data + sizeof(*conn);
1970 conn->session = session; 2085 conn->session = session;
1971 conn->cls_conn = cls_conn; 2086 conn->cls_conn = cls_conn;
1972 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2087 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
@@ -1985,30 +2100,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1985 INIT_LIST_HEAD(&conn->requeue); 2100 INIT_LIST_HEAD(&conn->requeue);
1986 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2101 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
1987 2102
1988 /* allocate login_mtask used for the login/text sequences */ 2103 /* allocate login_task used for the login/text sequences */
1989 spin_lock_bh(&session->lock); 2104 spin_lock_bh(&session->lock);
1990 if (!__kfifo_get(session->mgmtpool.queue, 2105 if (!__kfifo_get(session->cmdpool.queue,
1991 (void*)&conn->login_mtask, 2106 (void*)&conn->login_task,
1992 sizeof(void*))) { 2107 sizeof(void*))) {
1993 spin_unlock_bh(&session->lock); 2108 spin_unlock_bh(&session->lock);
1994 goto login_mtask_alloc_fail; 2109 goto login_task_alloc_fail;
1995 } 2110 }
1996 spin_unlock_bh(&session->lock); 2111 spin_unlock_bh(&session->lock);
1997 2112
1998 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); 2113 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
1999 if (!data) 2114 if (!data)
2000 goto login_mtask_data_alloc_fail; 2115 goto login_task_data_alloc_fail;
2001 conn->login_mtask->data = conn->data = data; 2116 conn->login_task->data = conn->data = data;
2002 2117
2003 init_timer(&conn->tmf_timer); 2118 init_timer(&conn->tmf_timer);
2004 init_waitqueue_head(&conn->ehwait); 2119 init_waitqueue_head(&conn->ehwait);
2005 2120
2006 return cls_conn; 2121 return cls_conn;
2007 2122
2008login_mtask_data_alloc_fail: 2123login_task_data_alloc_fail:
2009 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, 2124 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
2010 sizeof(void*)); 2125 sizeof(void*));
2011login_mtask_alloc_fail: 2126login_task_alloc_fail:
2012 iscsi_destroy_conn(cls_conn); 2127 iscsi_destroy_conn(cls_conn);
2013 return NULL; 2128 return NULL;
2014} 2129}
@@ -2068,7 +2183,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2068 spin_lock_bh(&session->lock); 2183 spin_lock_bh(&session->lock);
2069 kfree(conn->data); 2184 kfree(conn->data);
2070 kfree(conn->persistent_address); 2185 kfree(conn->persistent_address);
2071 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, 2186 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
2072 sizeof(void*)); 2187 sizeof(void*));
2073 if (session->leadconn == conn) 2188 if (session->leadconn == conn)
2074 session->leadconn = NULL; 2189 session->leadconn = NULL;
@@ -2140,7 +2255,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2140 } 2255 }
2141 spin_unlock_bh(&session->lock); 2256 spin_unlock_bh(&session->lock);
2142 2257
2143 iscsi_unblock_session(session_to_cls(session)); 2258 iscsi_unblock_session(session->cls_session);
2144 wake_up(&conn->ehwait); 2259 wake_up(&conn->ehwait);
2145 return 0; 2260 return 0;
2146} 2261}
@@ -2149,21 +2264,23 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
2149static void 2264static void
2150flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) 2265flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
2151{ 2266{
2152 struct iscsi_mgmt_task *mtask, *tmp; 2267 struct iscsi_task *task, *tmp;
2153 2268
2154 /* handle pending */ 2269 /* handle pending */
2155 list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) { 2270 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
2156 debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt); 2271 debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
2157 iscsi_free_mgmt_task(conn, mtask); 2272 /* release ref from prep task */
2273 __iscsi_put_task(task);
2158 } 2274 }
2159 2275
2160 /* handle running */ 2276 /* handle running */
2161 list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) { 2277 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
2162 debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt); 2278 debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
2163 iscsi_free_mgmt_task(conn, mtask); 2279 /* release ref from prep task */
2280 __iscsi_put_task(task);
2164 } 2281 }
2165 2282
2166 conn->mtask = NULL; 2283 conn->task = NULL;
2167} 2284}
2168 2285
2169static void iscsi_start_session_recovery(struct iscsi_session *session, 2286static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2182,17 +2299,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2182 } 2299 }
2183 2300
2184 /* 2301 /*
2185 * The LLD either freed/unset the lock on us, or userspace called
2186 * stop but did not create a proper connection (connection was never
2187 * bound or it was unbound then stop was called).
2188 */
2189 if (!conn->recv_lock) {
2190 spin_unlock_bh(&session->lock);
2191 mutex_unlock(&session->eh_mutex);
2192 return;
2193 }
2194
2195 /*
2196 * When this is called for the in_login state, we only want to clean 2302 * When this is called for the in_login state, we only want to clean
2197 * up the login task and connection. We do not need to block and set 2303 * up the login task and connection. We do not need to block and set
2198 * the recovery state again 2304 * the recovery state again
@@ -2208,11 +2314,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2208 spin_unlock_bh(&session->lock); 2314 spin_unlock_bh(&session->lock);
2209 2315
2210 iscsi_suspend_tx(conn); 2316 iscsi_suspend_tx(conn);
2211
2212 write_lock_bh(conn->recv_lock);
2213 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2214 write_unlock_bh(conn->recv_lock);
2215
2216 /* 2317 /*
2217 * for connection level recovery we should not calculate 2318 * for connection level recovery we should not calculate
2218 * header digest. conn->hdr_size used for optimization 2319 * header digest. conn->hdr_size used for optimization
@@ -2225,7 +2326,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2225 if (session->state == ISCSI_STATE_IN_RECOVERY && 2326 if (session->state == ISCSI_STATE_IN_RECOVERY &&
2226 old_stop_stage != STOP_CONN_RECOVER) { 2327 old_stop_stage != STOP_CONN_RECOVER) {
2227 debug_scsi("blocking session\n"); 2328 debug_scsi("blocking session\n");
2228 iscsi_block_session(session_to_cls(session)); 2329 iscsi_block_session(session->cls_session);
2229 } 2330 }
2230 } 2331 }
2231 2332
@@ -2260,7 +2361,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
2260int iscsi_conn_bind(struct iscsi_cls_session *cls_session, 2361int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2261 struct iscsi_cls_conn *cls_conn, int is_leading) 2362 struct iscsi_cls_conn *cls_conn, int is_leading)
2262{ 2363{
2263 struct iscsi_session *session = class_to_transport_session(cls_session); 2364 struct iscsi_session *session = cls_session->dd_data;
2264 struct iscsi_conn *conn = cls_conn->dd_data; 2365 struct iscsi_conn *conn = cls_conn->dd_data;
2265 2366
2266 spin_lock_bh(&session->lock); 2367 spin_lock_bh(&session->lock);
@@ -2399,6 +2500,14 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2399 if (!conn->persistent_address) 2500 if (!conn->persistent_address)
2400 return -ENOMEM; 2501 return -ENOMEM;
2401 break; 2502 break;
2503 case ISCSI_PARAM_IFACE_NAME:
2504 if (!session->ifacename)
2505 session->ifacename = kstrdup(buf, GFP_KERNEL);
2506 break;
2507 case ISCSI_PARAM_INITIATOR_NAME:
2508 if (!session->initiatorname)
2509 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2510 break;
2402 default: 2511 default:
2403 return -ENOSYS; 2512 return -ENOSYS;
2404 } 2513 }
@@ -2410,8 +2519,7 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
2410int iscsi_session_get_param(struct iscsi_cls_session *cls_session, 2519int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2411 enum iscsi_param param, char *buf) 2520 enum iscsi_param param, char *buf)
2412{ 2521{
2413 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 2522 struct iscsi_session *session = cls_session->dd_data;
2414 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2415 int len; 2523 int len;
2416 2524
2417 switch(param) { 2525 switch(param) {
@@ -2466,6 +2574,15 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2466 case ISCSI_PARAM_PASSWORD_IN: 2574 case ISCSI_PARAM_PASSWORD_IN:
2467 len = sprintf(buf, "%s\n", session->password_in); 2575 len = sprintf(buf, "%s\n", session->password_in);
2468 break; 2576 break;
2577 case ISCSI_PARAM_IFACE_NAME:
2578 len = sprintf(buf, "%s\n", session->ifacename);
2579 break;
2580 case ISCSI_PARAM_INITIATOR_NAME:
2581 if (!session->initiatorname)
2582 len = sprintf(buf, "%s\n", "unknown");
2583 else
2584 len = sprintf(buf, "%s\n", session->initiatorname);
2585 break;
2469 default: 2586 default:
2470 return -ENOSYS; 2587 return -ENOSYS;
2471 } 2588 }
@@ -2525,29 +2642,35 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
2525int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2642int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2526 char *buf) 2643 char *buf)
2527{ 2644{
2528 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2645 struct iscsi_host *ihost = shost_priv(shost);
2529 int len; 2646 int len;
2530 2647
2531 switch (param) { 2648 switch (param) {
2532 case ISCSI_HOST_PARAM_NETDEV_NAME: 2649 case ISCSI_HOST_PARAM_NETDEV_NAME:
2533 if (!session->netdev) 2650 if (!ihost->netdev)
2534 len = sprintf(buf, "%s\n", "default"); 2651 len = sprintf(buf, "%s\n", "default");
2535 else 2652 else
2536 len = sprintf(buf, "%s\n", session->netdev); 2653 len = sprintf(buf, "%s\n", ihost->netdev);
2537 break; 2654 break;
2538 case ISCSI_HOST_PARAM_HWADDRESS: 2655 case ISCSI_HOST_PARAM_HWADDRESS:
2539 if (!session->hwaddress) 2656 if (!ihost->hwaddress)
2540 len = sprintf(buf, "%s\n", "default"); 2657 len = sprintf(buf, "%s\n", "default");
2541 else 2658 else
2542 len = sprintf(buf, "%s\n", session->hwaddress); 2659 len = sprintf(buf, "%s\n", ihost->hwaddress);
2543 break; 2660 break;
2544 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2661 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2545 if (!session->initiatorname) 2662 if (!ihost->initiatorname)
2546 len = sprintf(buf, "%s\n", "unknown"); 2663 len = sprintf(buf, "%s\n", "unknown");
2547 else 2664 else
2548 len = sprintf(buf, "%s\n", session->initiatorname); 2665 len = sprintf(buf, "%s\n", ihost->initiatorname);
2666 break;
2667 case ISCSI_HOST_PARAM_IPADDRESS:
2668 if (!strlen(ihost->local_address))
2669 len = sprintf(buf, "%s\n", "unknown");
2670 else
2671 len = sprintf(buf, "%s\n",
2672 ihost->local_address);
2549 break; 2673 break;
2550
2551 default: 2674 default:
2552 return -ENOSYS; 2675 return -ENOSYS;
2553 } 2676 }
@@ -2559,20 +2682,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
2559int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2682int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2560 char *buf, int buflen) 2683 char *buf, int buflen)
2561{ 2684{
2562 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2685 struct iscsi_host *ihost = shost_priv(shost);
2563 2686
2564 switch (param) { 2687 switch (param) {
2565 case ISCSI_HOST_PARAM_NETDEV_NAME: 2688 case ISCSI_HOST_PARAM_NETDEV_NAME:
2566 if (!session->netdev) 2689 if (!ihost->netdev)
2567 session->netdev = kstrdup(buf, GFP_KERNEL); 2690 ihost->netdev = kstrdup(buf, GFP_KERNEL);
2568 break; 2691 break;
2569 case ISCSI_HOST_PARAM_HWADDRESS: 2692 case ISCSI_HOST_PARAM_HWADDRESS:
2570 if (!session->hwaddress) 2693 if (!ihost->hwaddress)
2571 session->hwaddress = kstrdup(buf, GFP_KERNEL); 2694 ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
2572 break; 2695 break;
2573 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2696 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2574 if (!session->initiatorname) 2697 if (!ihost->initiatorname)
2575 session->initiatorname = kstrdup(buf, GFP_KERNEL); 2698 ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
2576 break; 2699 break;
2577 default: 2700 default:
2578 return -ENOSYS; 2701 return -ENOSYS;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index ec0b0f6e5e1a..e0e018d12653 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -33,6 +33,7 @@ struct lpfc_sli2_slim;
33#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ 33#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */
34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
36#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
36 37
37/* 38/*
38 * Following time intervals are used of adjusting SCSI device 39 * Following time intervals are used of adjusting SCSI device
@@ -59,6 +60,9 @@ struct lpfc_sli2_slim;
59 60
60#define MAX_HBAEVT 32 61#define MAX_HBAEVT 32
61 62
63/* lpfc wait event data ready flag */
64#define LPFC_DATA_READY (1<<0)
65
62enum lpfc_polling_flags { 66enum lpfc_polling_flags {
63 ENABLE_FCP_RING_POLLING = 0x1, 67 ENABLE_FCP_RING_POLLING = 0x1,
64 DISABLE_FCP_RING_INT = 0x2 68 DISABLE_FCP_RING_INT = 0x2
@@ -425,9 +429,6 @@ struct lpfc_hba {
425 429
426 uint16_t pci_cfg_value; 430 uint16_t pci_cfg_value;
427 431
428 uint8_t work_found;
429#define LPFC_MAX_WORKER_ITERATION 4
430
431 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 432 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
432 433
433 uint32_t fc_eventTag; /* event tag for link attention */ 434 uint32_t fc_eventTag; /* event tag for link attention */
@@ -489,8 +490,9 @@ struct lpfc_hba {
489 uint32_t work_hs; /* HS stored in case of ERRAT */ 490 uint32_t work_hs; /* HS stored in case of ERRAT */
490 uint32_t work_status[2]; /* Extra status from SLIM */ 491 uint32_t work_status[2]; /* Extra status from SLIM */
491 492
492 wait_queue_head_t *work_wait; 493 wait_queue_head_t work_waitq;
493 struct task_struct *worker_thread; 494 struct task_struct *worker_thread;
495 long data_flags;
494 496
495 uint32_t hbq_in_use; /* HBQs in use flag */ 497 uint32_t hbq_in_use; /* HBQs in use flag */
496 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 498 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
@@ -637,6 +639,17 @@ lpfc_is_link_up(struct lpfc_hba *phba)
637 phba->link_state == LPFC_HBA_READY; 639 phba->link_state == LPFC_HBA_READY;
638} 640}
639 641
642static inline void
643lpfc_worker_wake_up(struct lpfc_hba *phba)
644{
645 /* Set the lpfc data pending flag */
646 set_bit(LPFC_DATA_READY, &phba->data_flags);
647
648 /* Wake up worker thread */
649 wake_up(&phba->work_waitq);
650 return;
651}
652
640#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ 653#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
641#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature 654#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
642 event */ 655 event */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 960baaf11fb1..37bfa0bd1dae 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1995,8 +1995,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1995 /* Don't allow mailbox commands to be sent when blocked 1995 /* Don't allow mailbox commands to be sent when blocked
1996 * or when in the middle of discovery 1996 * or when in the middle of discovery
1997 */ 1997 */
1998 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO || 1998 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
1999 vport->fc_flag & FC_NDISC_ACTIVE) {
2000 sysfs_mbox_idle(phba); 1999 sysfs_mbox_idle(phba);
2001 spin_unlock_irq(&phba->hbalock); 2000 spin_unlock_irq(&phba->hbalock);
2002 return -EAGAIN; 2001 return -EAGAIN;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 7c9f8317d972..1b8245213b83 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -142,7 +142,7 @@ int lpfc_config_port_post(struct lpfc_hba *);
142int lpfc_hba_down_prep(struct lpfc_hba *); 142int lpfc_hba_down_prep(struct lpfc_hba *);
143int lpfc_hba_down_post(struct lpfc_hba *); 143int lpfc_hba_down_post(struct lpfc_hba *);
144void lpfc_hba_init(struct lpfc_hba *, uint32_t *); 144void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
145int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int); 145int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
146void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); 146void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
147int lpfc_online(struct lpfc_hba *); 147int lpfc_online(struct lpfc_hba *);
148void lpfc_unblock_mgmt_io(struct lpfc_hba *); 148void lpfc_unblock_mgmt_io(struct lpfc_hba *);
@@ -263,6 +263,7 @@ extern int lpfc_sli_mode;
263extern int lpfc_enable_npiv; 263extern int lpfc_enable_npiv;
264 264
265int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); 265int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
266int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
266void lpfc_terminate_rport_io(struct fc_rport *); 267void lpfc_terminate_rport_io(struct fc_rport *);
267void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); 268void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
268 269
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 153afae567b5..7fc74cf5823b 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -101,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
101 /* Not enough posted buffers; Try posting more buffers */ 101 /* Not enough posted buffers; Try posting more buffers */
102 phba->fc_stat.NoRcvBuf++; 102 phba->fc_stat.NoRcvBuf++;
103 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 103 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
104 lpfc_post_buffer(phba, pring, 2, 1); 104 lpfc_post_buffer(phba, pring, 2);
105 return; 105 return;
106 } 106 }
107 107
@@ -151,7 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
151 } 151 }
152 list_del(&iocbq->list); 152 list_del(&iocbq->list);
153 lpfc_sli_release_iocbq(phba, iocbq); 153 lpfc_sli_release_iocbq(phba, iocbq);
154 lpfc_post_buffer(phba, pring, i, 1); 154 lpfc_post_buffer(phba, pring, i);
155 } 155 }
156 } 156 }
157} 157}
@@ -990,7 +990,7 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
990 return; 990 return;
991} 991}
992 992
993static int 993int
994lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, 994lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
995 size_t size) 995 size_t size)
996{ 996{
@@ -1679,20 +1679,18 @@ lpfc_fdmi_tmo(unsigned long ptr)
1679{ 1679{
1680 struct lpfc_vport *vport = (struct lpfc_vport *)ptr; 1680 struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
1681 struct lpfc_hba *phba = vport->phba; 1681 struct lpfc_hba *phba = vport->phba;
1682 uint32_t tmo_posted;
1682 unsigned long iflag; 1683 unsigned long iflag;
1683 1684
1684 spin_lock_irqsave(&vport->work_port_lock, iflag); 1685 spin_lock_irqsave(&vport->work_port_lock, iflag);
1685 if (!(vport->work_port_events & WORKER_FDMI_TMO)) { 1686 tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
1687 if (!tmo_posted)
1686 vport->work_port_events |= WORKER_FDMI_TMO; 1688 vport->work_port_events |= WORKER_FDMI_TMO;
1687 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 1689 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1688 1690
1689 spin_lock_irqsave(&phba->hbalock, iflag); 1691 if (!tmo_posted)
1690 if (phba->work_wait) 1692 lpfc_worker_wake_up(phba);
1691 lpfc_worker_wake_up(phba); 1693 return;
1692 spin_unlock_irqrestore(&phba->hbalock, iflag);
1693 }
1694 else
1695 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1696} 1694}
1697 1695
1698void 1696void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 886c5f1b11d2..f54e0f7eaee3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1754,29 +1754,34 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1755 struct lpfc_work_evt *evtp; 1755 struct lpfc_work_evt *evtp;
1756 1756
1757 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
1758 return;
1757 spin_lock_irq(shost->host_lock); 1759 spin_lock_irq(shost->host_lock);
1758 nlp->nlp_flag &= ~NLP_DELAY_TMO; 1760 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1759 spin_unlock_irq(shost->host_lock); 1761 spin_unlock_irq(shost->host_lock);
1760 del_timer_sync(&nlp->nlp_delayfunc); 1762 del_timer_sync(&nlp->nlp_delayfunc);
1761 nlp->nlp_last_elscmd = 0; 1763 nlp->nlp_last_elscmd = 0;
1762
1763 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 1764 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
1764 list_del_init(&nlp->els_retry_evt.evt_listp); 1765 list_del_init(&nlp->els_retry_evt.evt_listp);
1765 /* Decrement nlp reference count held for the delayed retry */ 1766 /* Decrement nlp reference count held for the delayed retry */
1766 evtp = &nlp->els_retry_evt; 1767 evtp = &nlp->els_retry_evt;
1767 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 1768 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
1768 } 1769 }
1769
1770 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 1770 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
1771 spin_lock_irq(shost->host_lock); 1771 spin_lock_irq(shost->host_lock);
1772 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1772 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1773 spin_unlock_irq(shost->host_lock); 1773 spin_unlock_irq(shost->host_lock);
1774 if (vport->num_disc_nodes) { 1774 if (vport->num_disc_nodes) {
1775 /* Check to see if there are more 1775 if (vport->port_state < LPFC_VPORT_READY) {
1776 * PLOGIs to be sent 1776 /* Check if there are more ADISCs to be sent */
1777 */ 1777 lpfc_more_adisc(vport);
1778 lpfc_more_plogi(vport); 1778 if ((vport->num_disc_nodes == 0) &&
1779 1779 (vport->fc_npr_cnt))
1780 lpfc_els_disc_plogi(vport);
1781 } else {
1782 /* Check if there are more PLOGIs to be sent */
1783 lpfc_more_plogi(vport);
1784 }
1780 if (vport->num_disc_nodes == 0) { 1785 if (vport->num_disc_nodes == 0) {
1781 spin_lock_irq(shost->host_lock); 1786 spin_lock_irq(shost->host_lock);
1782 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1787 vport->fc_flag &= ~FC_NDISC_ACTIVE;
@@ -1798,10 +1803,6 @@ lpfc_els_retry_delay(unsigned long ptr)
1798 unsigned long flags; 1803 unsigned long flags;
1799 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 1804 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
1800 1805
1801 ndlp = (struct lpfc_nodelist *) ptr;
1802 phba = ndlp->vport->phba;
1803 evtp = &ndlp->els_retry_evt;
1804
1805 spin_lock_irqsave(&phba->hbalock, flags); 1806 spin_lock_irqsave(&phba->hbalock, flags);
1806 if (!list_empty(&evtp->evt_listp)) { 1807 if (!list_empty(&evtp->evt_listp)) {
1807 spin_unlock_irqrestore(&phba->hbalock, flags); 1808 spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1812,11 +1813,11 @@ lpfc_els_retry_delay(unsigned long ptr)
1812 * count until the queued work is done 1813 * count until the queued work is done
1813 */ 1814 */
1814 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 1815 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
1815 evtp->evt = LPFC_EVT_ELS_RETRY; 1816 if (evtp->evt_arg1) {
1816 list_add_tail(&evtp->evt_listp, &phba->work_list); 1817 evtp->evt = LPFC_EVT_ELS_RETRY;
1817 if (phba->work_wait) 1818 list_add_tail(&evtp->evt_listp, &phba->work_list);
1818 lpfc_worker_wake_up(phba); 1819 lpfc_worker_wake_up(phba);
1819 1820 }
1820 spin_unlock_irqrestore(&phba->hbalock, flags); 1821 spin_unlock_irqrestore(&phba->hbalock, flags);
1821 return; 1822 return;
1822} 1823}
@@ -2761,10 +2762,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2761 npr = (PRLI *) pcmd; 2762 npr = (PRLI *) pcmd;
2762 vpd = &phba->vpd; 2763 vpd = &phba->vpd;
2763 /* 2764 /*
2764 * If our firmware version is 3.20 or later, 2765 * If the remote port is a target and our firmware version is 3.20 or
2765 * set the following bits for FC-TAPE support. 2766 * later, set the following bits for FC-TAPE support.
2766 */ 2767 */
2767 if (vpd->rev.feaLevelHigh >= 0x02) { 2768 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
2769 (vpd->rev.feaLevelHigh >= 0x02)) {
2768 npr->ConfmComplAllowed = 1; 2770 npr->ConfmComplAllowed = 1;
2769 npr->Retry = 1; 2771 npr->Retry = 1;
2770 npr->TaskRetryIdReq = 1; 2772 npr->TaskRetryIdReq = 1;
@@ -3056,27 +3058,16 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
3056{ 3058{
3057 struct lpfc_nodelist *ndlp = NULL; 3059 struct lpfc_nodelist *ndlp = NULL;
3058 3060
3059 /* Look at all nodes effected by pending RSCNs and move 3061 /* Move all affected nodes by pending RSCNs to NPR state. */
3060 * them to NPR state.
3061 */
3062
3063 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3062 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3064 if (!NLP_CHK_NODE_ACT(ndlp) || 3063 if (!NLP_CHK_NODE_ACT(ndlp) ||
3065 ndlp->nlp_state == NLP_STE_UNUSED_NODE || 3064 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
3066 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) 3065 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
3067 continue; 3066 continue;
3068
3069 lpfc_disc_state_machine(vport, ndlp, NULL, 3067 lpfc_disc_state_machine(vport, ndlp, NULL,
3070 NLP_EVT_DEVICE_RECOVERY); 3068 NLP_EVT_DEVICE_RECOVERY);
3071 3069 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3072 /*
3073 * Make sure NLP_DELAY_TMO is NOT running after a device
3074 * recovery event.
3075 */
3076 if (ndlp->nlp_flag & NLP_DELAY_TMO)
3077 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3078 } 3070 }
3079
3080 return 0; 3071 return 0;
3081} 3072}
3082 3073
@@ -3781,91 +3772,27 @@ static int
3781lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3772lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3782 struct lpfc_nodelist *fan_ndlp) 3773 struct lpfc_nodelist *fan_ndlp)
3783{ 3774{
3784 struct lpfc_dmabuf *pcmd; 3775 struct lpfc_hba *phba = vport->phba;
3785 uint32_t *lp; 3776 uint32_t *lp;
3786 IOCB_t *icmd;
3787 uint32_t cmd, did;
3788 FAN *fp; 3777 FAN *fp;
3789 struct lpfc_nodelist *ndlp, *next_ndlp;
3790 struct lpfc_hba *phba = vport->phba;
3791
3792 /* FAN received */
3793 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3794 "0265 FAN received\n");
3795 icmd = &cmdiocb->iocb;
3796 did = icmd->un.elsreq64.remoteID;
3797 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
3798 lp = (uint32_t *)pcmd->virt;
3799
3800 cmd = *lp++;
3801 fp = (FAN *) lp;
3802 3778
3779 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
3780 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
3781 fp = (FAN *) ++lp;
3803 /* FAN received; Fan does not have a reply sequence */ 3782 /* FAN received; Fan does not have a reply sequence */
3804 3783 if ((vport == phba->pport) &&
3805 if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) { 3784 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
3806 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 3785 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
3807 sizeof(struct lpfc_name)) != 0) || 3786 sizeof(struct lpfc_name))) ||
3808 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 3787 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
3809 sizeof(struct lpfc_name)) != 0)) { 3788 sizeof(struct lpfc_name)))) {
3810 /* 3789 /* This port has switched fabrics. FLOGI is required */
3811 * This node has switched fabrics. FLOGI is required
3812 * Clean up the old rpi's
3813 */
3814
3815 list_for_each_entry_safe(ndlp, next_ndlp,
3816 &vport->fc_nodes, nlp_listp) {
3817 if (!NLP_CHK_NODE_ACT(ndlp))
3818 continue;
3819 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3820 continue;
3821 if (ndlp->nlp_type & NLP_FABRIC) {
3822 /*
3823 * Clean up old Fabric, Nameserver and
3824 * other NLP_FABRIC logins
3825 */
3826 lpfc_drop_node(vport, ndlp);
3827
3828 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3829 /* Fail outstanding I/O now since this
3830 * device is marked for PLOGI
3831 */
3832 lpfc_unreg_rpi(vport, ndlp);
3833 }
3834 }
3835
3836 lpfc_initial_flogi(vport); 3790 lpfc_initial_flogi(vport);
3837 return 0; 3791 } else {
3838 } 3792 /* FAN verified - skip FLOGI */
3839 /* Discovery not needed, 3793 vport->fc_myDID = vport->fc_prevDID;
3840 * move the nodes to their original state. 3794 lpfc_issue_fabric_reglogin(vport);
3841 */
3842 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
3843 nlp_listp) {
3844 if (!NLP_CHK_NODE_ACT(ndlp))
3845 continue;
3846 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3847 continue;
3848
3849 switch (ndlp->nlp_prev_state) {
3850 case NLP_STE_UNMAPPED_NODE:
3851 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3852 lpfc_nlp_set_state(vport, ndlp,
3853 NLP_STE_UNMAPPED_NODE);
3854 break;
3855
3856 case NLP_STE_MAPPED_NODE:
3857 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3858 lpfc_nlp_set_state(vport, ndlp,
3859 NLP_STE_MAPPED_NODE);
3860 break;
3861
3862 default:
3863 break;
3864 }
3865 } 3795 }
3866
3867 /* Start discovery - this should just do CLEAR_LA */
3868 lpfc_disc_start(vport);
3869 } 3796 }
3870 return 0; 3797 return 0;
3871} 3798}
@@ -3875,20 +3802,17 @@ lpfc_els_timeout(unsigned long ptr)
3875{ 3802{
3876 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 3803 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
3877 struct lpfc_hba *phba = vport->phba; 3804 struct lpfc_hba *phba = vport->phba;
3805 uint32_t tmo_posted;
3878 unsigned long iflag; 3806 unsigned long iflag;
3879 3807
3880 spin_lock_irqsave(&vport->work_port_lock, iflag); 3808 spin_lock_irqsave(&vport->work_port_lock, iflag);
3881 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { 3809 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
3810 if (!tmo_posted)
3882 vport->work_port_events |= WORKER_ELS_TMO; 3811 vport->work_port_events |= WORKER_ELS_TMO;
3883 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 3812 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3884 3813
3885 spin_lock_irqsave(&phba->hbalock, iflag); 3814 if (!tmo_posted)
3886 if (phba->work_wait) 3815 lpfc_worker_wake_up(phba);
3887 lpfc_worker_wake_up(phba);
3888 spin_unlock_irqrestore(&phba->hbalock, iflag);
3889 }
3890 else
3891 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3892 return; 3816 return;
3893} 3817}
3894 3818
@@ -3933,9 +3857,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
3933 els_command == ELS_CMD_FDISC) 3857 els_command == ELS_CMD_FDISC)
3934 continue; 3858 continue;
3935 3859
3936 if (vport != piocb->vport)
3937 continue;
3938
3939 if (piocb->drvrTimeout > 0) { 3860 if (piocb->drvrTimeout > 0) {
3940 if (piocb->drvrTimeout >= timeout) 3861 if (piocb->drvrTimeout >= timeout)
3941 piocb->drvrTimeout -= timeout; 3862 piocb->drvrTimeout -= timeout;
@@ -4089,7 +4010,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4089 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 4010 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
4090 cmd = *payload; 4011 cmd = *payload;
4091 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 4012 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
4092 lpfc_post_buffer(phba, pring, 1, 1); 4013 lpfc_post_buffer(phba, pring, 1);
4093 4014
4094 did = icmd->un.rcvels.remoteID; 4015 did = icmd->un.rcvels.remoteID;
4095 if (icmd->ulpStatus) { 4016 if (icmd->ulpStatus) {
@@ -4398,7 +4319,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4398 phba->fc_stat.NoRcvBuf++; 4319 phba->fc_stat.NoRcvBuf++;
4399 /* Not enough posted buffers; Try posting more buffers */ 4320 /* Not enough posted buffers; Try posting more buffers */
4400 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 4321 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
4401 lpfc_post_buffer(phba, pring, 0, 1); 4322 lpfc_post_buffer(phba, pring, 0);
4402 return; 4323 return;
4403 } 4324 }
4404 4325
@@ -4842,18 +4763,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
4842 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 4763 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4843 unsigned long iflags; 4764 unsigned long iflags;
4844 uint32_t tmo_posted; 4765 uint32_t tmo_posted;
4766
4845 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 4767 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
4846 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 4768 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
4847 if (!tmo_posted) 4769 if (!tmo_posted)
4848 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 4770 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
4849 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 4771 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
4850 4772
4851 if (!tmo_posted) { 4773 if (!tmo_posted)
4852 spin_lock_irqsave(&phba->hbalock, iflags); 4774 lpfc_worker_wake_up(phba);
4853 if (phba->work_wait) 4775 return;
4854 lpfc_worker_wake_up(phba);
4855 spin_unlock_irqrestore(&phba->hbalock, iflags);
4856 }
4857} 4776}
4858 4777
4859static void 4778static void
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7cb68feb04fd..a98d11bf3576 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
153 * count until this queued work is done 153 * count until this queued work is done
154 */ 154 */
155 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 155 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
156 evtp->evt = LPFC_EVT_DEV_LOSS; 156 if (evtp->evt_arg1) {
157 list_add_tail(&evtp->evt_listp, &phba->work_list); 157 evtp->evt = LPFC_EVT_DEV_LOSS;
158 if (phba->work_wait) 158 list_add_tail(&evtp->evt_listp, &phba->work_list);
159 wake_up(phba->work_wait); 159 lpfc_worker_wake_up(phba);
160 160 }
161 spin_unlock_irq(&phba->hbalock); 161 spin_unlock_irq(&phba->hbalock);
162 162
163 return; 163 return;
@@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
277} 277}
278 278
279
280void
281lpfc_worker_wake_up(struct lpfc_hba *phba)
282{
283 wake_up(phba->work_wait);
284 return;
285}
286
287static void 279static void
288lpfc_work_list_done(struct lpfc_hba *phba) 280lpfc_work_list_done(struct lpfc_hba *phba)
289{ 281{
@@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba)
429 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 421 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
430 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 422 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
431 pring->flag |= LPFC_DEFERRED_RING_EVENT; 423 pring->flag |= LPFC_DEFERRED_RING_EVENT;
424 /* Set the lpfc data pending flag */
425 set_bit(LPFC_DATA_READY, &phba->data_flags);
432 } else { 426 } else {
433 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 427 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
434 lpfc_sli_handle_slow_ring_event(phba, pring, 428 lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba)
459 lpfc_work_list_done(phba); 453 lpfc_work_list_done(phba);
460} 454}
461 455
462static int
463check_work_wait_done(struct lpfc_hba *phba)
464{
465 struct lpfc_vport *vport;
466 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
467 int rc = 0;
468
469 spin_lock_irq(&phba->hbalock);
470 list_for_each_entry(vport, &phba->port_list, listentry) {
471 if (vport->work_port_events) {
472 rc = 1;
473 break;
474 }
475 }
476 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
477 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
478 rc = 1;
479 phba->work_found++;
480 } else
481 phba->work_found = 0;
482 spin_unlock_irq(&phba->hbalock);
483 return rc;
484}
485
486
487int 456int
488lpfc_do_work(void *p) 457lpfc_do_work(void *p)
489{ 458{
490 struct lpfc_hba *phba = p; 459 struct lpfc_hba *phba = p;
491 int rc; 460 int rc;
492 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
493 461
494 set_user_nice(current, -20); 462 set_user_nice(current, -20);
495 phba->work_wait = &work_waitq; 463 phba->data_flags = 0;
496 phba->work_found = 0;
497 464
498 while (1) { 465 while (1) {
499 466 /* wait and check worker queue activities */
500 rc = wait_event_interruptible(work_waitq, 467 rc = wait_event_interruptible(phba->work_waitq,
501 check_work_wait_done(phba)); 468 (test_and_clear_bit(LPFC_DATA_READY,
502 469 &phba->data_flags)
470 || kthread_should_stop()));
503 BUG_ON(rc); 471 BUG_ON(rc);
504 472
505 if (kthread_should_stop()) 473 if (kthread_should_stop())
506 break; 474 break;
507 475
476 /* Attend pending lpfc data processing */
508 lpfc_work_done(phba); 477 lpfc_work_done(phba);
509
510 /* If there is alot of slow ring work, like during link up
511 * check_work_wait_done() may cause this thread to not give
512 * up the CPU for very long periods of time. This may cause
513 * soft lockups or other problems. To avoid these situations
514 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
515 * consecutive iterations.
516 */
517 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
518 phba->work_found = 0;
519 schedule();
520 }
521 } 478 }
522 spin_lock_irq(&phba->hbalock);
523 phba->work_wait = NULL;
524 spin_unlock_irq(&phba->hbalock);
525 return 0; 479 return 0;
526} 480}
527 481
@@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
551 505
552 spin_lock_irqsave(&phba->hbalock, flags); 506 spin_lock_irqsave(&phba->hbalock, flags);
553 list_add_tail(&evtp->evt_listp, &phba->work_list); 507 list_add_tail(&evtp->evt_listp, &phba->work_list);
554 if (phba->work_wait)
555 lpfc_worker_wake_up(phba);
556 spin_unlock_irqrestore(&phba->hbalock, flags); 508 spin_unlock_irqrestore(&phba->hbalock, flags);
557 509
510 lpfc_worker_wake_up(phba);
511
558 return 1; 512 return 1;
559} 513}
560 514
@@ -963,6 +917,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
963 if (phba->fc_topology == TOPOLOGY_LOOP) { 917 if (phba->fc_topology == TOPOLOGY_LOOP) {
964 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 918 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
965 919
920 if (phba->cfg_enable_npiv)
921 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
922 "1309 Link Up Event npiv not supported in loop "
923 "topology\n");
966 /* Get Loop Map information */ 924 /* Get Loop Map information */
967 if (la->il) 925 if (la->il)
968 vport->fc_flag |= FC_LBIT; 926 vport->fc_flag |= FC_LBIT;
@@ -1087,6 +1045,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1087 MAILBOX_t *mb = &pmb->mb; 1045 MAILBOX_t *mb = &pmb->mb;
1088 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1046 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1089 1047
1048 /* Unblock ELS traffic */
1049 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1090 /* Check for error */ 1050 /* Check for error */
1091 if (mb->mbxStatus) { 1051 if (mb->mbxStatus) {
1092 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1052 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -1650,7 +1610,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1650 ndlp->nlp_DID, old_state, state); 1610 ndlp->nlp_DID, old_state, state);
1651 1611
1652 if (old_state == NLP_STE_NPR_NODE && 1612 if (old_state == NLP_STE_NPR_NODE &&
1653 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1654 state != NLP_STE_NPR_NODE) 1613 state != NLP_STE_NPR_NODE)
1655 lpfc_cancel_retry_delay_tmo(vport, ndlp); 1614 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1656 if (old_state == NLP_STE_UNMAPPED_NODE) { 1615 if (old_state == NLP_STE_UNMAPPED_NODE) {
@@ -1687,8 +1646,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1687{ 1646{
1688 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1647 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1689 1648
1690 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1649 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1691 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1692 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 1650 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1693 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 1651 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1694 spin_lock_irq(shost->host_lock); 1652 spin_lock_irq(shost->host_lock);
@@ -1701,8 +1659,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1701static void 1659static void
1702lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1660lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1703{ 1661{
1704 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1662 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1705 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1706 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 1663 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1707 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 1664 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1708 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 1665 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
@@ -2121,10 +2078,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2121 ndlp->nlp_last_elscmd = 0; 2078 ndlp->nlp_last_elscmd = 0;
2122 del_timer_sync(&ndlp->nlp_delayfunc); 2079 del_timer_sync(&ndlp->nlp_delayfunc);
2123 2080
2124 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 2081 list_del_init(&ndlp->els_retry_evt.evt_listp);
2125 list_del_init(&ndlp->els_retry_evt.evt_listp); 2082 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2126 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
2127 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2128 2083
2129 lpfc_unreg_rpi(vport, ndlp); 2084 lpfc_unreg_rpi(vport, ndlp);
2130 2085
@@ -2144,10 +2099,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2144 LPFC_MBOXQ_t *mbox; 2099 LPFC_MBOXQ_t *mbox;
2145 int rc; 2100 int rc;
2146 2101
2147 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 2102 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2148 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2149 }
2150
2151 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { 2103 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
2152 /* For this case we need to cleanup the default rpi 2104 /* For this case we need to cleanup the default rpi
2153 * allocated by the firmware. 2105 * allocated by the firmware.
@@ -2317,8 +2269,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2317 /* Since this node is marked for discovery, 2269 /* Since this node is marked for discovery,
2318 * delay timeout is not needed. 2270 * delay timeout is not needed.
2319 */ 2271 */
2320 if (ndlp->nlp_flag & NLP_DELAY_TMO) 2272 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2321 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2322 } else 2273 } else
2323 ndlp = NULL; 2274 ndlp = NULL;
2324 } else { 2275 } else {
@@ -2643,21 +2594,20 @@ lpfc_disc_timeout(unsigned long ptr)
2643{ 2594{
2644 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 2595 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2645 struct lpfc_hba *phba = vport->phba; 2596 struct lpfc_hba *phba = vport->phba;
2597 uint32_t tmo_posted;
2646 unsigned long flags = 0; 2598 unsigned long flags = 0;
2647 2599
2648 if (unlikely(!phba)) 2600 if (unlikely(!phba))
2649 return; 2601 return;
2650 2602
2651 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { 2603 spin_lock_irqsave(&vport->work_port_lock, flags);
2652 spin_lock_irqsave(&vport->work_port_lock, flags); 2604 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
2605 if (!tmo_posted)
2653 vport->work_port_events |= WORKER_DISC_TMO; 2606 vport->work_port_events |= WORKER_DISC_TMO;
2654 spin_unlock_irqrestore(&vport->work_port_lock, flags); 2607 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2655 2608
2656 spin_lock_irqsave(&phba->hbalock, flags); 2609 if (!tmo_posted)
2657 if (phba->work_wait) 2610 lpfc_worker_wake_up(phba);
2658 lpfc_worker_wake_up(phba);
2659 spin_unlock_irqrestore(&phba->hbalock, flags);
2660 }
2661 return; 2611 return;
2662} 2612}
2663 2613
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index fa757b251f82..5b6e5395c8eb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -145,8 +145,10 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
145 return -ERESTART; 145 return -ERESTART;
146 } 146 }
147 147
148 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) 148 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
149 mempool_free(pmb, phba->mbox_mem_pool);
149 return -EINVAL; 150 return -EINVAL;
151 }
150 152
151 /* Save information as VPD data */ 153 /* Save information as VPD data */
152 vp->rev.rBit = 1; 154 vp->rev.rBit = 1;
@@ -551,18 +553,18 @@ static void
551lpfc_hb_timeout(unsigned long ptr) 553lpfc_hb_timeout(unsigned long ptr)
552{ 554{
553 struct lpfc_hba *phba; 555 struct lpfc_hba *phba;
556 uint32_t tmo_posted;
554 unsigned long iflag; 557 unsigned long iflag;
555 558
556 phba = (struct lpfc_hba *)ptr; 559 phba = (struct lpfc_hba *)ptr;
557 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 560 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
558 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) 561 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
562 if (!tmo_posted)
559 phba->pport->work_port_events |= WORKER_HB_TMO; 563 phba->pport->work_port_events |= WORKER_HB_TMO;
560 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 564 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
561 565
562 spin_lock_irqsave(&phba->hbalock, iflag); 566 if (!tmo_posted)
563 if (phba->work_wait) 567 lpfc_worker_wake_up(phba);
564 wake_up(phba->work_wait);
565 spin_unlock_irqrestore(&phba->hbalock, iflag);
566 return; 568 return;
567} 569}
568 570
@@ -851,6 +853,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
851 lpfc_read_la(phba, pmb, mp); 853 lpfc_read_la(phba, pmb, mp);
852 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 854 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
853 pmb->vport = vport; 855 pmb->vport = vport;
856 /* Block ELS IOCBs until we have processed this mbox command */
857 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
854 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 858 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
855 if (rc == MBX_NOT_FINISHED) { 859 if (rc == MBX_NOT_FINISHED) {
856 rc = 4; 860 rc = 4;
@@ -866,6 +870,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
866 return; 870 return;
867 871
868lpfc_handle_latt_free_mbuf: 872lpfc_handle_latt_free_mbuf:
873 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
869 lpfc_mbuf_free(phba, mp->virt, mp->phys); 874 lpfc_mbuf_free(phba, mp->virt, mp->phys);
870lpfc_handle_latt_free_mp: 875lpfc_handle_latt_free_mp:
871 kfree(mp); 876 kfree(mp);
@@ -1194,8 +1199,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1194/* Returns the number of buffers NOT posted. */ 1199/* Returns the number of buffers NOT posted. */
1195/**************************************************/ 1200/**************************************************/
1196int 1201int
1197lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, 1202lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1198 int type)
1199{ 1203{
1200 IOCB_t *icmd; 1204 IOCB_t *icmd;
1201 struct lpfc_iocbq *iocb; 1205 struct lpfc_iocbq *iocb;
@@ -1295,7 +1299,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
1295 struct lpfc_sli *psli = &phba->sli; 1299 struct lpfc_sli *psli = &phba->sli;
1296 1300
1297 /* Ring 0, ELS / CT buffers */ 1301 /* Ring 0, ELS / CT buffers */
1298 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); 1302 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1299 /* Ring 2 - FCP no buffers needed */ 1303 /* Ring 2 - FCP no buffers needed */
1300 1304
1301 return 0; 1305 return 0;
@@ -1454,6 +1458,15 @@ lpfc_cleanup(struct lpfc_vport *vport)
1454 1458
1455 lpfc_disc_state_machine(vport, ndlp, NULL, 1459 lpfc_disc_state_machine(vport, ndlp, NULL,
1456 NLP_EVT_DEVICE_RM); 1460 NLP_EVT_DEVICE_RM);
1461
1462 /* nlp_type zero is not defined, nlp_flag zero also not defined,
1463 * nlp_state is unused, this happens when
1464 * an initiator has logged
1465 * into us so cleanup this ndlp.
1466 */
1467 if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) &&
1468 (ndlp->nlp_state == 0))
1469 lpfc_nlp_put(ndlp);
1457 } 1470 }
1458 1471
1459 /* At this point, ALL ndlp's should be gone 1472 /* At this point, ALL ndlp's should be gone
@@ -2101,6 +2114,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2101 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 2114 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
2102 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 2115 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
2103 2116
2117 /* Initialize the wait queue head for the kernel thread */
2118 init_waitqueue_head(&phba->work_waitq);
2119
2104 /* Startup the kernel thread for this host adapter. */ 2120 /* Startup the kernel thread for this host adapter. */
2105 phba->worker_thread = kthread_run(lpfc_do_work, phba, 2121 phba->worker_thread = kthread_run(lpfc_do_work, phba,
2106 "lpfc_worker_%d", phba->brd_no); 2122 "lpfc_worker_%d", phba->brd_no);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d08c4c890744..6688a8689b56 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -235,10 +235,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
235 (iocb->iocb_cmpl) (phba, iocb, iocb); 235 (iocb->iocb_cmpl) (phba, iocb, iocb);
236 } 236 }
237 } 237 }
238 238 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
239 /* If we are delaying issuing an ELS command, cancel it */
240 if (ndlp->nlp_flag & NLP_DELAY_TMO)
241 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
242 return 0; 239 return 0;
243} 240}
244 241
@@ -249,7 +246,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 246 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
250 struct lpfc_hba *phba = vport->phba; 247 struct lpfc_hba *phba = vport->phba;
251 struct lpfc_dmabuf *pcmd; 248 struct lpfc_dmabuf *pcmd;
252 struct lpfc_work_evt *evtp;
253 uint32_t *lp; 249 uint32_t *lp;
254 IOCB_t *icmd; 250 IOCB_t *icmd;
255 struct serv_parm *sp; 251 struct serv_parm *sp;
@@ -425,73 +421,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
425 ndlp, mbox); 421 ndlp, mbox);
426 return 1; 422 return 1;
427 } 423 }
428
429 /* If the remote NPort logs into us, before we can initiate
430 * discovery to them, cleanup the NPort from discovery accordingly.
431 */
432 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
433 spin_lock_irq(shost->host_lock);
434 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
435 spin_unlock_irq(shost->host_lock);
436 del_timer_sync(&ndlp->nlp_delayfunc);
437 ndlp->nlp_last_elscmd = 0;
438
439 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) {
440 list_del_init(&ndlp->els_retry_evt.evt_listp);
441 /* Decrement ndlp reference count held for the
442 * delayed retry
443 */
444 evtp = &ndlp->els_retry_evt;
445 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
446 }
447
448 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
449 spin_lock_irq(shost->host_lock);
450 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
451 spin_unlock_irq(shost->host_lock);
452
453 if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
454 (vport->num_disc_nodes)) {
455 /* Check to see if there are more
456 * ADISCs to be sent
457 */
458 lpfc_more_adisc(vport);
459
460 if ((vport->num_disc_nodes == 0) &&
461 (vport->fc_npr_cnt))
462 lpfc_els_disc_plogi(vport);
463
464 if (vport->num_disc_nodes == 0) {
465 spin_lock_irq(shost->host_lock);
466 vport->fc_flag &= ~FC_NDISC_ACTIVE;
467 spin_unlock_irq(shost->host_lock);
468 lpfc_can_disctmo(vport);
469 lpfc_end_rscn(vport);
470 }
471 }
472 }
473 } else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
474 (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
475 (vport->num_disc_nodes)) {
476 spin_lock_irq(shost->host_lock);
477 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
478 spin_unlock_irq(shost->host_lock);
479 /* Check to see if there are more
480 * PLOGIs to be sent
481 */
482 lpfc_more_plogi(vport);
483 if (vport->num_disc_nodes == 0) {
484 spin_lock_irq(shost->host_lock);
485 vport->fc_flag &= ~FC_NDISC_ACTIVE;
486 spin_unlock_irq(shost->host_lock);
487 lpfc_can_disctmo(vport);
488 lpfc_end_rscn(vport);
489 }
490 }
491
492 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); 424 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
493 return 1; 425 return 1;
494
495out: 426out:
496 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 427 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
497 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 428 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
@@ -574,7 +505,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
574 else 505 else
575 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 506 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
576 507
577 if (!(ndlp->nlp_type & NLP_FABRIC) || 508 if ((!(ndlp->nlp_type & NLP_FABRIC) &&
509 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
510 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
578 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 511 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
579 /* Only try to re-login if this is NOT a Fabric Node */ 512 /* Only try to re-login if this is NOT a Fabric Node */
580 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 513 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
@@ -751,6 +684,7 @@ static uint32_t
751lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 684lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
752 void *arg, uint32_t evt) 685 void *arg, uint32_t evt)
753{ 686{
687 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
754 struct lpfc_hba *phba = vport->phba; 688 struct lpfc_hba *phba = vport->phba;
755 struct lpfc_iocbq *cmdiocb = arg; 689 struct lpfc_iocbq *cmdiocb = arg;
756 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 690 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -776,7 +710,22 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
776 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 710 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
777 NULL); 711 NULL);
778 } else { 712 } else {
779 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 713 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
714 (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
715 (vport->num_disc_nodes)) {
716 spin_lock_irq(shost->host_lock);
717 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
718 spin_unlock_irq(shost->host_lock);
719 /* Check if there are more PLOGIs to be sent */
720 lpfc_more_plogi(vport);
721 if (vport->num_disc_nodes == 0) {
722 spin_lock_irq(shost->host_lock);
723 vport->fc_flag &= ~FC_NDISC_ACTIVE;
724 spin_unlock_irq(shost->host_lock);
725 lpfc_can_disctmo(vport);
726 lpfc_end_rscn(vport);
727 }
728 }
780 } /* If our portname was less */ 729 } /* If our portname was less */
781 730
782 return ndlp->nlp_state; 731 return ndlp->nlp_state;
@@ -1040,6 +989,7 @@ static uint32_t
1040lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 989lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1041 void *arg, uint32_t evt) 990 void *arg, uint32_t evt)
1042{ 991{
992 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1043 struct lpfc_hba *phba = vport->phba; 993 struct lpfc_hba *phba = vport->phba;
1044 struct lpfc_iocbq *cmdiocb; 994 struct lpfc_iocbq *cmdiocb;
1045 995
@@ -1048,9 +998,28 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1048 998
1049 cmdiocb = (struct lpfc_iocbq *) arg; 999 cmdiocb = (struct lpfc_iocbq *) arg;
1050 1000
1051 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) 1001 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1052 return ndlp->nlp_state; 1002 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1003 spin_lock_irq(shost->host_lock);
1004 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1005 spin_unlock_irq(shost->host_lock);
1053 1006
1007 if (vport->num_disc_nodes) {
1008 lpfc_more_adisc(vport);
1009 if ((vport->num_disc_nodes == 0) &&
1010 (vport->fc_npr_cnt))
1011 lpfc_els_disc_plogi(vport);
1012 if (vport->num_disc_nodes == 0) {
1013 spin_lock_irq(shost->host_lock);
1014 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1015 spin_unlock_irq(shost->host_lock);
1016 lpfc_can_disctmo(vport);
1017 lpfc_end_rscn(vport);
1018 }
1019 }
1020 }
1021 return ndlp->nlp_state;
1022 }
1054 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1023 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1055 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1024 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1056 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1025 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -1742,24 +1711,21 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1742 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1711 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1743 1712
1744 /* Ignore PLOGI if we have an outstanding LOGO */ 1713 /* Ignore PLOGI if we have an outstanding LOGO */
1745 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) { 1714 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
1746 return ndlp->nlp_state; 1715 return ndlp->nlp_state;
1747 }
1748
1749 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1716 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1717 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1750 spin_lock_irq(shost->host_lock); 1718 spin_lock_irq(shost->host_lock);
1751 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1719 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1752 spin_unlock_irq(shost->host_lock); 1720 spin_unlock_irq(shost->host_lock);
1753 return ndlp->nlp_state; 1721 } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1754 } 1722 /* send PLOGI immediately, move to PLOGI issue state */
1755 1723 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1756 /* send PLOGI immediately, move to PLOGI issue state */ 1724 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1757 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1725 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1758 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1726 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1759 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1727 }
1760 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1761 } 1728 }
1762
1763 return ndlp->nlp_state; 1729 return ndlp->nlp_state;
1764} 1730}
1765 1731
@@ -1810,7 +1776,6 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1810 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1776 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1811 1777
1812 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1778 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1813
1814 /* 1779 /*
1815 * Do not start discovery if discovery is about to start 1780 * Do not start discovery if discovery is about to start
1816 * or discovery in progress for this node. Starting discovery 1781 * or discovery in progress for this node. Starting discovery
@@ -1973,9 +1938,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1973 spin_lock_irq(shost->host_lock); 1938 spin_lock_irq(shost->host_lock);
1974 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1939 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1975 spin_unlock_irq(shost->host_lock); 1940 spin_unlock_irq(shost->host_lock);
1976 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1941 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1977 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1978 }
1979 return ndlp->nlp_state; 1942 return ndlp->nlp_state;
1980} 1943}
1981 1944
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 0910a9ab76a5..c94da4f2b8a6 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -50,6 +50,7 @@ void
50lpfc_adjust_queue_depth(struct lpfc_hba *phba) 50lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51{ 51{
52 unsigned long flags; 52 unsigned long flags;
53 uint32_t evt_posted;
53 54
54 spin_lock_irqsave(&phba->hbalock, flags); 55 spin_lock_irqsave(&phba->hbalock, flags);
55 atomic_inc(&phba->num_rsrc_err); 56 atomic_inc(&phba->num_rsrc_err);
@@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
65 spin_unlock_irqrestore(&phba->hbalock, flags); 66 spin_unlock_irqrestore(&phba->hbalock, flags);
66 67
67 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 68 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68 if ((phba->pport->work_port_events & 69 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
69 WORKER_RAMP_DOWN_QUEUE) == 0) { 70 if (!evt_posted)
70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 71 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71 }
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73 73
74 spin_lock_irqsave(&phba->hbalock, flags); 74 if (!evt_posted)
75 if (phba->work_wait) 75 lpfc_worker_wake_up(phba);
76 wake_up(phba->work_wait);
77 spin_unlock_irqrestore(&phba->hbalock, flags);
78
79 return; 76 return;
80} 77}
81 78
@@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
89{ 86{
90 unsigned long flags; 87 unsigned long flags;
91 struct lpfc_hba *phba = vport->phba; 88 struct lpfc_hba *phba = vport->phba;
89 uint32_t evt_posted;
92 atomic_inc(&phba->num_cmd_success); 90 atomic_inc(&phba->num_cmd_success);
93 91
94 if (vport->cfg_lun_queue_depth <= sdev->queue_depth) 92 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
@@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
103 spin_unlock_irqrestore(&phba->hbalock, flags); 101 spin_unlock_irqrestore(&phba->hbalock, flags);
104 102
105 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 103 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
106 if ((phba->pport->work_port_events & 104 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
107 WORKER_RAMP_UP_QUEUE) == 0) { 105 if (!evt_posted)
108 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 106 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
109 }
110 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 107 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
111 108
112 spin_lock_irqsave(&phba->hbalock, flags); 109 if (!evt_posted)
113 if (phba->work_wait) 110 lpfc_worker_wake_up(phba);
114 wake_up(phba->work_wait); 111 return;
115 spin_unlock_irqrestore(&phba->hbalock, flags);
116} 112}
117 113
118void 114void
@@ -609,9 +605,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
609 result = cmd->result; 605 result = cmd->result;
610 sdev = cmd->device; 606 sdev = cmd->device;
611 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 607 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
612 spin_lock_irqsave(sdev->host->host_lock, flags);
613 lpfc_cmd->pCmd = NULL; /* This must be done before scsi_done */
614 spin_unlock_irqrestore(sdev->host->host_lock, flags);
615 cmd->scsi_done(cmd); 608 cmd->scsi_done(cmd);
616 609
617 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 610 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -620,6 +613,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
620 * wake up the thread. 613 * wake up the thread.
621 */ 614 */
622 spin_lock_irqsave(sdev->host->host_lock, flags); 615 spin_lock_irqsave(sdev->host->host_lock, flags);
616 lpfc_cmd->pCmd = NULL;
623 if (lpfc_cmd->waitq) 617 if (lpfc_cmd->waitq)
624 wake_up(lpfc_cmd->waitq); 618 wake_up(lpfc_cmd->waitq);
625 spin_unlock_irqrestore(sdev->host->host_lock, flags); 619 spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -690,6 +684,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
690 * wake up the thread. 684 * wake up the thread.
691 */ 685 */
692 spin_lock_irqsave(sdev->host->host_lock, flags); 686 spin_lock_irqsave(sdev->host->host_lock, flags);
687 lpfc_cmd->pCmd = NULL;
693 if (lpfc_cmd->waitq) 688 if (lpfc_cmd->waitq)
694 wake_up(lpfc_cmd->waitq); 689 wake_up(lpfc_cmd->waitq);
695 spin_unlock_irqrestore(sdev->host->host_lock, flags); 690 spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -849,14 +844,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
849 struct lpfc_iocbq *iocbq; 844 struct lpfc_iocbq *iocbq;
850 struct lpfc_iocbq *iocbqrsp; 845 struct lpfc_iocbq *iocbqrsp;
851 int ret; 846 int ret;
847 int status;
852 848
853 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) 849 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
854 return FAILED; 850 return FAILED;
855 851
856 lpfc_cmd->rdata = rdata; 852 lpfc_cmd->rdata = rdata;
857 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, 853 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
858 FCP_TARGET_RESET); 854 FCP_TARGET_RESET);
859 if (!ret) 855 if (!status)
860 return FAILED; 856 return FAILED;
861 857
862 iocbq = &lpfc_cmd->cur_iocbq; 858 iocbq = &lpfc_cmd->cur_iocbq;
@@ -869,12 +865,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
869 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 865 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
870 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 866 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
871 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 867 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
872 ret = lpfc_sli_issue_iocb_wait(phba, 868 status = lpfc_sli_issue_iocb_wait(phba,
873 &phba->sli.ring[phba->sli.fcp_ring], 869 &phba->sli.ring[phba->sli.fcp_ring],
874 iocbq, iocbqrsp, lpfc_cmd->timeout); 870 iocbq, iocbqrsp, lpfc_cmd->timeout);
875 if (ret != IOCB_SUCCESS) { 871 if (status != IOCB_SUCCESS) {
876 if (ret == IOCB_TIMEDOUT) 872 if (status == IOCB_TIMEDOUT) {
877 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 873 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
874 ret = TIMEOUT_ERROR;
875 } else
876 ret = FAILED;
878 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 877 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
879 } else { 878 } else {
880 ret = SUCCESS; 879 ret = SUCCESS;
@@ -1142,121 +1141,96 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1142 struct lpfc_iocbq *iocbq, *iocbqrsp; 1141 struct lpfc_iocbq *iocbq, *iocbqrsp;
1143 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1142 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1144 struct lpfc_nodelist *pnode = rdata->pnode; 1143 struct lpfc_nodelist *pnode = rdata->pnode;
1145 uint32_t cmd_result = 0, cmd_status = 0; 1144 unsigned long later;
1146 int ret = FAILED; 1145 int ret = SUCCESS;
1147 int iocb_status = IOCB_SUCCESS; 1146 int status;
1148 int cnt, loopcnt; 1147 int cnt;
1149 1148
1150 lpfc_block_error_handler(cmnd); 1149 lpfc_block_error_handler(cmnd);
1151 loopcnt = 0;
1152 /* 1150 /*
1153 * If target is not in a MAPPED state, delay the reset until 1151 * If target is not in a MAPPED state, delay the reset until
1154 * target is rediscovered or devloss timeout expires. 1152 * target is rediscovered or devloss timeout expires.
1155 */ 1153 */
1156 while (1) { 1154 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1155 while (time_after(later, jiffies)) {
1157 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 1156 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1158 goto out; 1157 return FAILED;
1159
1160 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1161 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1162 loopcnt++;
1163 rdata = cmnd->device->hostdata;
1164 if (!rdata ||
1165 (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
1166 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1167 "0721 LUN Reset rport "
1168 "failure: cnt x%x rdata x%p\n",
1169 loopcnt, rdata);
1170 goto out;
1171 }
1172 pnode = rdata->pnode;
1173 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1174 goto out;
1175 }
1176 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1158 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1177 break; 1159 break;
1160 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1161 rdata = cmnd->device->hostdata;
1162 if (!rdata)
1163 break;
1164 pnode = rdata->pnode;
1165 }
1166 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1167 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1168 "0721 LUN Reset rport "
1169 "failure: msec x%x rdata x%p\n",
1170 jiffies_to_msecs(jiffies - later), rdata);
1171 return FAILED;
1178 } 1172 }
1179
1180 lpfc_cmd = lpfc_get_scsi_buf(phba); 1173 lpfc_cmd = lpfc_get_scsi_buf(phba);
1181 if (lpfc_cmd == NULL) 1174 if (lpfc_cmd == NULL)
1182 goto out; 1175 return FAILED;
1183
1184 lpfc_cmd->timeout = 60; 1176 lpfc_cmd->timeout = 60;
1185 lpfc_cmd->rdata = rdata; 1177 lpfc_cmd->rdata = rdata;
1186 1178
1187 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, 1179 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
1188 FCP_TARGET_RESET); 1180 cmnd->device->lun,
1189 if (!ret) 1181 FCP_TARGET_RESET);
1190 goto out_free_scsi_buf; 1182 if (!status) {
1191 1183 lpfc_release_scsi_buf(phba, lpfc_cmd);
1184 return FAILED;
1185 }
1192 iocbq = &lpfc_cmd->cur_iocbq; 1186 iocbq = &lpfc_cmd->cur_iocbq;
1193 1187
1194 /* get a buffer for this IOCB command response */ 1188 /* get a buffer for this IOCB command response */
1195 iocbqrsp = lpfc_sli_get_iocbq(phba); 1189 iocbqrsp = lpfc_sli_get_iocbq(phba);
1196 if (iocbqrsp == NULL) 1190 if (iocbqrsp == NULL) {
1197 goto out_free_scsi_buf; 1191 lpfc_release_scsi_buf(phba, lpfc_cmd);
1198 1192 return FAILED;
1193 }
1199 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1194 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1200 "0703 Issue target reset to TGT %d LUN %d " 1195 "0703 Issue target reset to TGT %d LUN %d "
1201 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 1196 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1202 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1197 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1203 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1198 status = lpfc_sli_issue_iocb_wait(phba,
1204 &phba->sli.ring[phba->sli.fcp_ring], 1199 &phba->sli.ring[phba->sli.fcp_ring],
1205 iocbq, iocbqrsp, lpfc_cmd->timeout); 1200 iocbq, iocbqrsp, lpfc_cmd->timeout);
1206 1201 if (status == IOCB_TIMEDOUT) {
1207 if (iocb_status == IOCB_TIMEDOUT)
1208 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 1202 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1209 1203 ret = TIMEOUT_ERROR;
1210 if (iocb_status == IOCB_SUCCESS) 1204 } else {
1211 ret = SUCCESS; 1205 if (status != IOCB_SUCCESS)
1212 else 1206 ret = FAILED;
1213 ret = iocb_status; 1207 lpfc_release_scsi_buf(phba, lpfc_cmd);
1214 1208 }
1215 cmd_result = iocbqrsp->iocb.un.ulpWord[4]; 1209 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1216 cmd_status = iocbqrsp->iocb.ulpStatus; 1210 "0713 SCSI layer issued device reset (%d, %d) "
1217 1211 "return x%x status x%x result x%x\n",
1212 cmnd->device->id, cmnd->device->lun, ret,
1213 iocbqrsp->iocb.ulpStatus,
1214 iocbqrsp->iocb.un.ulpWord[4]);
1218 lpfc_sli_release_iocbq(phba, iocbqrsp); 1215 lpfc_sli_release_iocbq(phba, iocbqrsp);
1219
1220 /*
1221 * All outstanding txcmplq I/Os should have been aborted by the device.
1222 * Unfortunately, some targets do not abide by this forcing the driver
1223 * to double check.
1224 */
1225 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, 1216 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1226 LPFC_CTX_LUN); 1217 LPFC_CTX_TGT);
1227 if (cnt) 1218 if (cnt)
1228 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1219 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1229 cmnd->device->id, cmnd->device->lun, 1220 cmnd->device->id, cmnd->device->lun,
1230 LPFC_CTX_LUN); 1221 LPFC_CTX_TGT);
1231 loopcnt = 0; 1222 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1232 while(cnt) { 1223 while (time_after(later, jiffies) && cnt) {
1233 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1224 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1234
1235 if (++loopcnt
1236 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1237 break;
1238
1239 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, 1225 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1240 cmnd->device->lun, LPFC_CTX_LUN); 1226 cmnd->device->lun, LPFC_CTX_TGT);
1241 } 1227 }
1242
1243 if (cnt) { 1228 if (cnt) {
1244 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1229 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1245 "0719 device reset I/O flush failure: " 1230 "0719 device reset I/O flush failure: "
1246 "cnt x%x\n", cnt); 1231 "cnt x%x\n", cnt);
1247 ret = FAILED; 1232 ret = FAILED;
1248 } 1233 }
1249
1250out_free_scsi_buf:
1251 if (iocb_status != IOCB_TIMEDOUT) {
1252 lpfc_release_scsi_buf(phba, lpfc_cmd);
1253 }
1254 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1255 "0713 SCSI layer issued device reset (%d, %d) "
1256 "return x%x status x%x result x%x\n",
1257 cmnd->device->id, cmnd->device->lun, ret,
1258 cmd_status, cmd_result);
1259out:
1260 return ret; 1234 return ret;
1261} 1235}
1262 1236
@@ -1268,19 +1242,12 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1268 struct lpfc_hba *phba = vport->phba; 1242 struct lpfc_hba *phba = vport->phba;
1269 struct lpfc_nodelist *ndlp = NULL; 1243 struct lpfc_nodelist *ndlp = NULL;
1270 int match; 1244 int match;
1271 int ret = FAILED, i, err_count = 0; 1245 int ret = SUCCESS, status, i;
1272 int cnt, loopcnt; 1246 int cnt;
1273 struct lpfc_scsi_buf * lpfc_cmd; 1247 struct lpfc_scsi_buf * lpfc_cmd;
1248 unsigned long later;
1274 1249
1275 lpfc_block_error_handler(cmnd); 1250 lpfc_block_error_handler(cmnd);
1276
1277 lpfc_cmd = lpfc_get_scsi_buf(phba);
1278 if (lpfc_cmd == NULL)
1279 goto out;
1280
1281 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1282 lpfc_cmd->timeout = 60;
1283
1284 /* 1251 /*
1285 * Since the driver manages a single bus device, reset all 1252 * Since the driver manages a single bus device, reset all
1286 * targets known to the driver. Should any target reset 1253 * targets known to the driver. Should any target reset
@@ -1294,7 +1261,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1294 if (!NLP_CHK_NODE_ACT(ndlp)) 1261 if (!NLP_CHK_NODE_ACT(ndlp))
1295 continue; 1262 continue;
1296 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1263 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1297 i == ndlp->nlp_sid && 1264 ndlp->nlp_sid == i &&
1298 ndlp->rport) { 1265 ndlp->rport) {
1299 match = 1; 1266 match = 1;
1300 break; 1267 break;
@@ -1303,27 +1270,22 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1303 spin_unlock_irq(shost->host_lock); 1270 spin_unlock_irq(shost->host_lock);
1304 if (!match) 1271 if (!match)
1305 continue; 1272 continue;
1306 1273 lpfc_cmd = lpfc_get_scsi_buf(phba);
1307 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 1274 if (lpfc_cmd) {
1308 cmnd->device->lun, 1275 lpfc_cmd->timeout = 60;
1309 ndlp->rport->dd_data); 1276 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1310 if (ret != SUCCESS) { 1277 cmnd->device->lun,
1278 ndlp->rport->dd_data);
1279 if (status != TIMEOUT_ERROR)
1280 lpfc_release_scsi_buf(phba, lpfc_cmd);
1281 }
1282 if (!lpfc_cmd || status != SUCCESS) {
1311 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1283 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1312 "0700 Bus Reset on target %d failed\n", 1284 "0700 Bus Reset on target %d failed\n",
1313 i); 1285 i);
1314 err_count++; 1286 ret = FAILED;
1315 break;
1316 } 1287 }
1317 } 1288 }
1318
1319 if (ret != IOCB_TIMEDOUT)
1320 lpfc_release_scsi_buf(phba, lpfc_cmd);
1321
1322 if (err_count == 0)
1323 ret = SUCCESS;
1324 else
1325 ret = FAILED;
1326
1327 /* 1289 /*
1328 * All outstanding txcmplq I/Os should have been aborted by 1290 * All outstanding txcmplq I/Os should have been aborted by
1329 * the targets. Unfortunately, some targets do not abide by 1291 * the targets. Unfortunately, some targets do not abide by
@@ -1333,27 +1295,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1333 if (cnt) 1295 if (cnt)
1334 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1296 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1335 0, 0, LPFC_CTX_HOST); 1297 0, 0, LPFC_CTX_HOST);
1336 loopcnt = 0; 1298 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1337 while(cnt) { 1299 while (time_after(later, jiffies) && cnt) {
1338 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1300 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1339
1340 if (++loopcnt
1341 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1342 break;
1343
1344 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 1301 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1345 } 1302 }
1346
1347 if (cnt) { 1303 if (cnt) {
1348 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1304 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1349 "0715 Bus Reset I/O flush failure: " 1305 "0715 Bus Reset I/O flush failure: "
1350 "cnt x%x left x%x\n", cnt, i); 1306 "cnt x%x left x%x\n", cnt, i);
1351 ret = FAILED; 1307 ret = FAILED;
1352 } 1308 }
1353
1354 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1309 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1355 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 1310 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
1356out:
1357 return ret; 1311 return ret;
1358} 1312}
1359 1313
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 70a0a9eab211..f40aa7b905f7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -324,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
324 phba->work_ha |= HA_ERATT; 324 phba->work_ha |= HA_ERATT;
325 phba->work_hs = HS_FFER3; 325 phba->work_hs = HS_FFER3;
326 326
327 /* hbalock should already be held */ 327 lpfc_worker_wake_up(phba);
328 if (phba->work_wait)
329 lpfc_worker_wake_up(phba);
330 328
331 return NULL; 329 return NULL;
332 } 330 }
@@ -1309,9 +1307,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1309 phba->work_ha |= HA_ERATT; 1307 phba->work_ha |= HA_ERATT;
1310 phba->work_hs = HS_FFER3; 1308 phba->work_hs = HS_FFER3;
1311 1309
1312 /* hbalock should already be held */ 1310 lpfc_worker_wake_up(phba);
1313 if (phba->work_wait)
1314 lpfc_worker_wake_up(phba);
1315 1311
1316 return; 1312 return;
1317} 1313}
@@ -2611,12 +2607,9 @@ lpfc_mbox_timeout(unsigned long ptr)
2611 phba->pport->work_port_events |= WORKER_MBOX_TMO; 2607 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2612 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 2608 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2613 2609
2614 if (!tmo_posted) { 2610 if (!tmo_posted)
2615 spin_lock_irqsave(&phba->hbalock, iflag); 2611 lpfc_worker_wake_up(phba);
2616 if (phba->work_wait) 2612 return;
2617 lpfc_worker_wake_up(phba);
2618 spin_unlock_irqrestore(&phba->hbalock, iflag);
2619 }
2620} 2613}
2621 2614
2622void 2615void
@@ -3374,8 +3367,12 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
3374 for (i = 0; i < psli->num_rings; i++) { 3367 for (i = 0; i < psli->num_rings; i++) {
3375 pring = &psli->ring[i]; 3368 pring = &psli->ring[i];
3376 prev_pring_flag = pring->flag; 3369 prev_pring_flag = pring->flag;
3377 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3370 /* Only slow rings */
3371 if (pring->ringno == LPFC_ELS_RING) {
3378 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3372 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3373 /* Set the lpfc data pending flag */
3374 set_bit(LPFC_DATA_READY, &phba->data_flags);
3375 }
3379 /* 3376 /*
3380 * Error everything on the txq since these iocbs have not been 3377 * Error everything on the txq since these iocbs have not been
3381 * given to the FW yet. 3378 * given to the FW yet.
@@ -3434,8 +3431,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3434 spin_lock_irqsave(&phba->hbalock, flags); 3431 spin_lock_irqsave(&phba->hbalock, flags);
3435 for (i = 0; i < psli->num_rings; i++) { 3432 for (i = 0; i < psli->num_rings; i++) {
3436 pring = &psli->ring[i]; 3433 pring = &psli->ring[i];
3437 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3434 /* Only slow rings */
3435 if (pring->ringno == LPFC_ELS_RING) {
3438 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3436 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3437 /* Set the lpfc data pending flag */
3438 set_bit(LPFC_DATA_READY, &phba->data_flags);
3439 }
3439 3440
3440 /* 3441 /*
3441 * Error everything on the txq since these iocbs have not been 3442 * Error everything on the txq since these iocbs have not been
@@ -3762,7 +3763,6 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3762 lpfc_ctx_cmd ctx_cmd) 3763 lpfc_ctx_cmd ctx_cmd)
3763{ 3764{
3764 struct lpfc_scsi_buf *lpfc_cmd; 3765 struct lpfc_scsi_buf *lpfc_cmd;
3765 struct scsi_cmnd *cmnd;
3766 int rc = 1; 3766 int rc = 1;
3767 3767
3768 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 3768 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
@@ -3772,19 +3772,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3772 return rc; 3772 return rc;
3773 3773
3774 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 3774 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3775 cmnd = lpfc_cmd->pCmd;
3776 3775
3777 if (cmnd == NULL) 3776 if (lpfc_cmd->pCmd == NULL)
3778 return rc; 3777 return rc;
3779 3778
3780 switch (ctx_cmd) { 3779 switch (ctx_cmd) {
3781 case LPFC_CTX_LUN: 3780 case LPFC_CTX_LUN:
3782 if ((cmnd->device->id == tgt_id) && 3781 if ((lpfc_cmd->rdata->pnode) &&
3783 (cmnd->device->lun == lun_id)) 3782 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
3783 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
3784 rc = 0; 3784 rc = 0;
3785 break; 3785 break;
3786 case LPFC_CTX_TGT: 3786 case LPFC_CTX_TGT:
3787 if (cmnd->device->id == tgt_id) 3787 if ((lpfc_cmd->rdata->pnode) &&
3788 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
3788 rc = 0; 3789 rc = 0;
3789 break; 3790 break;
3790 case LPFC_CTX_HOST: 3791 case LPFC_CTX_HOST:
@@ -3994,6 +3995,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3994 if (pmboxq->context1) 3995 if (pmboxq->context1)
3995 return MBX_NOT_FINISHED; 3996 return MBX_NOT_FINISHED;
3996 3997
3998 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
3997 /* setup wake call as IOCB callback */ 3999 /* setup wake call as IOCB callback */
3998 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 4000 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3999 /* setup context field to pass wait_queue pointer to wake function */ 4001 /* setup context field to pass wait_queue pointer to wake function */
@@ -4159,7 +4161,7 @@ lpfc_intr_handler(int irq, void *dev_id)
4159 "pwork:x%x hawork:x%x wait:x%x", 4161 "pwork:x%x hawork:x%x wait:x%x",
4160 phba->work_ha, work_ha_copy, 4162 phba->work_ha, work_ha_copy,
4161 (uint32_t)((unsigned long) 4163 (uint32_t)((unsigned long)
4162 phba->work_wait)); 4164 &phba->work_waitq));
4163 4165
4164 control &= 4166 control &=
4165 ~(HC_R0INT_ENA << LPFC_ELS_RING); 4167 ~(HC_R0INT_ENA << LPFC_ELS_RING);
@@ -4172,7 +4174,7 @@ lpfc_intr_handler(int irq, void *dev_id)
4172 "x%x hawork:x%x wait:x%x", 4174 "x%x hawork:x%x wait:x%x",
4173 phba->work_ha, work_ha_copy, 4175 phba->work_ha, work_ha_copy,
4174 (uint32_t)((unsigned long) 4176 (uint32_t)((unsigned long)
4175 phba->work_wait)); 4177 &phba->work_waitq));
4176 } 4178 }
4177 spin_unlock(&phba->hbalock); 4179 spin_unlock(&phba->hbalock);
4178 } 4180 }
@@ -4297,9 +4299,8 @@ send_current_mbox:
4297 4299
4298 spin_lock(&phba->hbalock); 4300 spin_lock(&phba->hbalock);
4299 phba->work_ha |= work_ha_copy; 4301 phba->work_ha |= work_ha_copy;
4300 if (phba->work_wait)
4301 lpfc_worker_wake_up(phba);
4302 spin_unlock(&phba->hbalock); 4302 spin_unlock(&phba->hbalock);
4303 lpfc_worker_wake_up(phba);
4303 } 4304 }
4304 4305
4305 ha_copy &= ~(phba->work_ha_mask); 4306 ha_copy &= ~(phba->work_ha_mask);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b22b893019f4..ad24cacfbe10 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.2.6" 21#define LPFC_DRIVER_VERSION "8.2.7"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 6feaf59b0b1b..109f89d98830 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -216,6 +216,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
216 int vpi; 216 int vpi;
217 int rc = VPORT_ERROR; 217 int rc = VPORT_ERROR;
218 int status; 218 int status;
219 int size;
219 220
220 if ((phba->sli_rev < 3) || 221 if ((phba->sli_rev < 3) ||
221 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { 222 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
@@ -278,7 +279,20 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
278 279
279 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); 280 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
280 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); 281 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
281 282 size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
283 if (size) {
284 vport->vname = kzalloc(size+1, GFP_KERNEL);
285 if (!vport->vname) {
286 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
287 "1814 Create VPORT failed. "
288 "vname allocation failed.\n");
289 rc = VPORT_ERROR;
290 lpfc_free_vpi(phba, vpi);
291 destroy_port(vport);
292 goto error_out;
293 }
294 memcpy(vport->vname, fc_vport->symbolic_name, size+1);
295 }
282 if (fc_vport->node_name != 0) 296 if (fc_vport->node_name != 0)
283 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); 297 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
284 if (fc_vport->port_name != 0) 298 if (fc_vport->port_name != 0)
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index fd63b06d9ef1..11aa917629ac 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1765,7 +1765,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1765 default: 1765 default:
1766 return 0; 1766 return 0;
1767 } 1767 }
1768 if (mesg.event == mdev->ofdev.dev.power.power_state.event) 1768 if (ms->phase == sleeping)
1769 return 0; 1769 return 0;
1770 1770
1771 scsi_block_requests(ms->host); 1771 scsi_block_requests(ms->host);
@@ -1780,8 +1780,6 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1780 disable_irq(ms->meshintr); 1780 disable_irq(ms->meshintr);
1781 set_mesh_power(ms, 0); 1781 set_mesh_power(ms, 0);
1782 1782
1783 mdev->ofdev.dev.power.power_state = mesg;
1784
1785 return 0; 1783 return 0;
1786} 1784}
1787 1785
@@ -1790,7 +1788,7 @@ static int mesh_resume(struct macio_dev *mdev)
1790 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); 1788 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1791 unsigned long flags; 1789 unsigned long flags;
1792 1790
1793 if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON) 1791 if (ms->phase != sleeping)
1794 return 0; 1792 return 0;
1795 1793
1796 set_mesh_power(ms, 1); 1794 set_mesh_power(ms, 1);
@@ -1801,8 +1799,6 @@ static int mesh_resume(struct macio_dev *mdev)
1801 enable_irq(ms->meshintr); 1799 enable_irq(ms->meshintr);
1802 scsi_unblock_requests(ms->host); 1800 scsi_unblock_requests(ms->host);
1803 1801
1804 mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
1805
1806 return 0; 1802 return 0;
1807} 1803}
1808 1804
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0c786944d2c2..5822dd595826 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -113,9 +113,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
113 .host_param_mask = ISCSI_HOST_HWADDRESS | 113 .host_param_mask = ISCSI_HOST_HWADDRESS |
114 ISCSI_HOST_IPADDRESS | 114 ISCSI_HOST_IPADDRESS |
115 ISCSI_HOST_INITIATOR_NAME, 115 ISCSI_HOST_INITIATOR_NAME,
116 .sessiondata_size = sizeof(struct ddb_entry),
117 .host_template = &qla4xxx_driver_template,
118
119 .tgt_dscvr = qla4xxx_tgt_dscvr, 116 .tgt_dscvr = qla4xxx_tgt_dscvr,
120 .get_conn_param = qla4xxx_conn_get_param, 117 .get_conn_param = qla4xxx_conn_get_param,
121 .get_session_param = qla4xxx_sess_get_param, 118 .get_session_param = qla4xxx_sess_get_param,
@@ -275,7 +272,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
275 return err; 272 return err;
276 } 273 }
277 274
278 ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0); 275 ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0);
279 if (!ddb_entry->conn) { 276 if (!ddb_entry->conn) {
280 iscsi_remove_session(ddb_entry->sess); 277 iscsi_remove_session(ddb_entry->sess);
281 DEBUG2(printk(KERN_ERR "Could not add connection.\n")); 278 DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
@@ -292,7 +289,8 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
292 struct ddb_entry *ddb_entry; 289 struct ddb_entry *ddb_entry;
293 struct iscsi_cls_session *sess; 290 struct iscsi_cls_session *sess;
294 291
295 sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport); 292 sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
293 sizeof(struct ddb_entry));
296 if (!sess) 294 if (!sess)
297 return NULL; 295 return NULL;
298 296
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 110e776d1a07..36c92f961e15 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -855,9 +855,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
855 855
856 good_bytes = scsi_bufflen(cmd); 856 good_bytes = scsi_bufflen(cmd);
857 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { 857 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
858 int old_good_bytes = good_bytes;
858 drv = scsi_cmd_to_driver(cmd); 859 drv = scsi_cmd_to_driver(cmd);
859 if (drv->done) 860 if (drv->done)
860 good_bytes = drv->done(cmd); 861 good_bytes = drv->done(cmd);
862 /*
863 * USB may not give sense identifying bad sector and
864 * simply return a residue instead, so subtract off the
865 * residue if drv->done() error processing indicates no
866 * change to the completion length.
867 */
868 if (good_bytes == old_good_bytes)
869 good_bytes -= scsi_get_resid(cmd);
861 } 870 }
862 scsi_io_completion(cmd, good_bytes); 871 scsi_io_completion(cmd, good_bytes);
863} 872}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f6600bfb5bde..01d11a01ffbf 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -94,6 +94,7 @@ static const char * scsi_debug_version_date = "20070104";
94#define DEF_VIRTUAL_GB 0 94#define DEF_VIRTUAL_GB 0
95#define DEF_FAKE_RW 0 95#define DEF_FAKE_RW 0
96#define DEF_VPD_USE_HOSTNO 1 96#define DEF_VPD_USE_HOSTNO 1
97#define DEF_SECTOR_SIZE 512
97 98
98/* bit mask values for scsi_debug_opts */ 99/* bit mask values for scsi_debug_opts */
99#define SCSI_DEBUG_OPT_NOISE 1 100#define SCSI_DEBUG_OPT_NOISE 1
@@ -142,6 +143,7 @@ static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
142static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; 143static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
143static int scsi_debug_fake_rw = DEF_FAKE_RW; 144static int scsi_debug_fake_rw = DEF_FAKE_RW;
144static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; 145static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
146static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
145 147
146static int scsi_debug_cmnd_count = 0; 148static int scsi_debug_cmnd_count = 0;
147 149
@@ -157,11 +159,6 @@ static int sdebug_heads; /* heads per disk */
157static int sdebug_cylinders_per; /* cylinders per surface */ 159static int sdebug_cylinders_per; /* cylinders per surface */
158static int sdebug_sectors_per; /* sectors per cylinder */ 160static int sdebug_sectors_per; /* sectors per cylinder */
159 161
160/* default sector size is 512 bytes, 2**9 bytes */
161#define POW2_SECT_SIZE 9
162#define SECT_SIZE (1 << POW2_SECT_SIZE)
163#define SECT_SIZE_PER(TGT) SECT_SIZE
164
165#define SDEBUG_MAX_PARTS 4 162#define SDEBUG_MAX_PARTS 4
166 163
167#define SDEBUG_SENSE_LEN 32 164#define SDEBUG_SENSE_LEN 32
@@ -646,6 +643,14 @@ static int inquiry_evpd_b0(unsigned char * arr)
646 return sizeof(vpdb0_data); 643 return sizeof(vpdb0_data);
647} 644}
648 645
646static int inquiry_evpd_b1(unsigned char *arr)
647{
648 memset(arr, 0, 0x3c);
649 arr[0] = 0;
650 arr[1] = 1;
651
652 return 0x3c;
653}
649 654
650#define SDEBUG_LONG_INQ_SZ 96 655#define SDEBUG_LONG_INQ_SZ 96
651#define SDEBUG_MAX_INQ_ARR_SZ 584 656#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -701,6 +706,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
701 arr[n++] = 0x88; /* SCSI ports */ 706 arr[n++] = 0x88; /* SCSI ports */
702 arr[n++] = 0x89; /* ATA information */ 707 arr[n++] = 0x89; /* ATA information */
703 arr[n++] = 0xb0; /* Block limits (SBC) */ 708 arr[n++] = 0xb0; /* Block limits (SBC) */
709 arr[n++] = 0xb1; /* Block characteristics (SBC) */
704 arr[3] = n - 4; /* number of supported VPD pages */ 710 arr[3] = n - 4; /* number of supported VPD pages */
705 } else if (0x80 == cmd[2]) { /* unit serial number */ 711 } else if (0x80 == cmd[2]) { /* unit serial number */
706 arr[1] = cmd[2]; /*sanity */ 712 arr[1] = cmd[2]; /*sanity */
@@ -740,6 +746,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
740 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */ 746 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
741 arr[1] = cmd[2]; /*sanity */ 747 arr[1] = cmd[2]; /*sanity */
742 arr[3] = inquiry_evpd_b0(&arr[4]); 748 arr[3] = inquiry_evpd_b0(&arr[4]);
749 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
750 arr[1] = cmd[2]; /*sanity */
751 arr[3] = inquiry_evpd_b1(&arr[4]);
743 } else { 752 } else {
744 /* Illegal request, invalid field in cdb */ 753 /* Illegal request, invalid field in cdb */
745 mk_sense_buffer(devip, ILLEGAL_REQUEST, 754 mk_sense_buffer(devip, ILLEGAL_REQUEST,
@@ -878,8 +887,8 @@ static int resp_readcap(struct scsi_cmnd * scp,
878 arr[2] = 0xff; 887 arr[2] = 0xff;
879 arr[3] = 0xff; 888 arr[3] = 0xff;
880 } 889 }
881 arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; 890 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
882 arr[7] = SECT_SIZE_PER(target) & 0xff; 891 arr[7] = scsi_debug_sector_size & 0xff;
883 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); 892 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
884} 893}
885 894
@@ -902,10 +911,10 @@ static int resp_readcap16(struct scsi_cmnd * scp,
902 capac = sdebug_capacity - 1; 911 capac = sdebug_capacity - 1;
903 for (k = 0; k < 8; ++k, capac >>= 8) 912 for (k = 0; k < 8; ++k, capac >>= 8)
904 arr[7 - k] = capac & 0xff; 913 arr[7 - k] = capac & 0xff;
905 arr[8] = (SECT_SIZE_PER(target) >> 24) & 0xff; 914 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
906 arr[9] = (SECT_SIZE_PER(target) >> 16) & 0xff; 915 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
907 arr[10] = (SECT_SIZE_PER(target) >> 8) & 0xff; 916 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
908 arr[11] = SECT_SIZE_PER(target) & 0xff; 917 arr[11] = scsi_debug_sector_size & 0xff;
909 return fill_from_dev_buffer(scp, arr, 918 return fill_from_dev_buffer(scp, arr,
910 min(alloc_len, SDEBUG_READCAP16_ARR_SZ)); 919 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
911} 920}
@@ -1019,20 +1028,20 @@ static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1019 1028
1020static int resp_format_pg(unsigned char * p, int pcontrol, int target) 1029static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1021{ /* Format device page for mode_sense */ 1030{ /* Format device page for mode_sense */
1022 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, 1031 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1023 0, 0, 0, 0, 0, 0, 0, 0, 1032 0, 0, 0, 0, 0, 0, 0, 0,
1024 0, 0, 0, 0, 0x40, 0, 0, 0}; 1033 0, 0, 0, 0, 0x40, 0, 0, 0};
1025 1034
1026 memcpy(p, format_pg, sizeof(format_pg)); 1035 memcpy(p, format_pg, sizeof(format_pg));
1027 p[10] = (sdebug_sectors_per >> 8) & 0xff; 1036 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1028 p[11] = sdebug_sectors_per & 0xff; 1037 p[11] = sdebug_sectors_per & 0xff;
1029 p[12] = (SECT_SIZE >> 8) & 0xff; 1038 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1030 p[13] = SECT_SIZE & 0xff; 1039 p[13] = scsi_debug_sector_size & 0xff;
1031 if (DEV_REMOVEABLE(target)) 1040 if (DEV_REMOVEABLE(target))
1032 p[20] |= 0x20; /* should agree with INQUIRY */ 1041 p[20] |= 0x20; /* should agree with INQUIRY */
1033 if (1 == pcontrol) 1042 if (1 == pcontrol)
1034 memset(p + 2, 0, sizeof(format_pg) - 2); 1043 memset(p + 2, 0, sizeof(format_pg) - 2);
1035 return sizeof(format_pg); 1044 return sizeof(format_pg);
1036} 1045}
1037 1046
1038static int resp_caching_pg(unsigned char * p, int pcontrol, int target) 1047static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
@@ -1206,8 +1215,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1206 ap[2] = (sdebug_capacity >> 8) & 0xff; 1215 ap[2] = (sdebug_capacity >> 8) & 0xff;
1207 ap[3] = sdebug_capacity & 0xff; 1216 ap[3] = sdebug_capacity & 0xff;
1208 } 1217 }
1209 ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; 1218 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1210 ap[7] = SECT_SIZE_PER(target) & 0xff; 1219 ap[7] = scsi_debug_sector_size & 0xff;
1211 offset += bd_len; 1220 offset += bd_len;
1212 ap = arr + offset; 1221 ap = arr + offset;
1213 } else if (16 == bd_len) { 1222 } else if (16 == bd_len) {
@@ -1215,10 +1224,10 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1215 1224
1216 for (k = 0; k < 8; ++k, capac >>= 8) 1225 for (k = 0; k < 8; ++k, capac >>= 8)
1217 ap[7 - k] = capac & 0xff; 1226 ap[7 - k] = capac & 0xff;
1218 ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff; 1227 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1219 ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff; 1228 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1220 ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff; 1229 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1221 ap[15] = SECT_SIZE_PER(target) & 0xff; 1230 ap[15] = scsi_debug_sector_size & 0xff;
1222 offset += bd_len; 1231 offset += bd_len;
1223 ap = arr + offset; 1232 ap = arr + offset;
1224 } 1233 }
@@ -1519,10 +1528,10 @@ static int do_device_access(struct scsi_cmnd *scmd,
1519 if (block + num > sdebug_store_sectors) 1528 if (block + num > sdebug_store_sectors)
1520 rest = block + num - sdebug_store_sectors; 1529 rest = block + num - sdebug_store_sectors;
1521 1530
1522 ret = func(scmd, fake_storep + (block * SECT_SIZE), 1531 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1523 (num - rest) * SECT_SIZE); 1532 (num - rest) * scsi_debug_sector_size);
1524 if (!ret && rest) 1533 if (!ret && rest)
1525 ret = func(scmd, fake_storep, rest * SECT_SIZE); 1534 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1526 1535
1527 return ret; 1536 return ret;
1528} 1537}
@@ -1575,10 +1584,10 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1575 write_unlock_irqrestore(&atomic_rw, iflags); 1584 write_unlock_irqrestore(&atomic_rw, iflags);
1576 if (-1 == ret) 1585 if (-1 == ret)
1577 return (DID_ERROR << 16); 1586 return (DID_ERROR << 16);
1578 else if ((ret < (num * SECT_SIZE)) && 1587 else if ((ret < (num * scsi_debug_sector_size)) &&
1579 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 1588 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1580 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " 1589 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
1581 " IO sent=%d bytes\n", num * SECT_SIZE, ret); 1590 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
1582 return 0; 1591 return 0;
1583} 1592}
1584 1593
@@ -2085,6 +2094,7 @@ module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2085module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); 2094module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2086module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, 2095module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2087 S_IRUGO | S_IWUSR); 2096 S_IRUGO | S_IWUSR);
2097module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2088 2098
2089MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2099MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2090MODULE_DESCRIPTION("SCSI debug adapter driver"); 2100MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2106,6 +2116,7 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2106MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2116MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2107MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 2117MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2108MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 2118MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2119MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)");
2109 2120
2110 2121
2111static char sdebug_info[256]; 2122static char sdebug_info[256];
@@ -2158,8 +2169,9 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta
2158 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth, 2169 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2159 scsi_debug_cmnd_count, scsi_debug_delay, 2170 scsi_debug_cmnd_count, scsi_debug_delay,
2160 scsi_debug_max_luns, scsi_debug_scsi_level, 2171 scsi_debug_max_luns, scsi_debug_scsi_level,
2161 SECT_SIZE, sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, 2172 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2162 num_aborts, num_dev_resets, num_bus_resets, num_host_resets); 2173 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2174 num_host_resets);
2163 if (pos < offset) { 2175 if (pos < offset) {
2164 len = 0; 2176 len = 0;
2165 begin = pos; 2177 begin = pos;
@@ -2434,6 +2446,12 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
2434DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show, 2446DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
2435 sdebug_vpd_use_hostno_store); 2447 sdebug_vpd_use_hostno_store);
2436 2448
2449static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
2450{
2451 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
2452}
2453DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
2454
2437/* Note: The following function creates attribute files in the 2455/* Note: The following function creates attribute files in the
2438 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 2456 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
2439 files (over those found in the /sys/module/scsi_debug/parameters 2457 files (over those found in the /sys/module/scsi_debug/parameters
@@ -2459,11 +2477,13 @@ static int do_create_driverfs_files(void)
2459 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2477 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2460 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); 2478 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2461 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); 2479 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2480 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
2462 return ret; 2481 return ret;
2463} 2482}
2464 2483
2465static void do_remove_driverfs_files(void) 2484static void do_remove_driverfs_files(void)
2466{ 2485{
2486 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
2467 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); 2487 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2468 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); 2488 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2469 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2489 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
@@ -2499,10 +2519,22 @@ static int __init scsi_debug_init(void)
2499 int k; 2519 int k;
2500 int ret; 2520 int ret;
2501 2521
2522 switch (scsi_debug_sector_size) {
2523 case 512:
2524 case 1024:
2525 case 2048:
2526 case 4096:
2527 break;
2528 default:
2529 printk(KERN_ERR "scsi_debug_init: invalid sector_size %u\n",
2530 scsi_debug_sector_size);
2531 return -EINVAL;
2532 }
2533
2502 if (scsi_debug_dev_size_mb < 1) 2534 if (scsi_debug_dev_size_mb < 1)
2503 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 2535 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
2504 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; 2536 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
2505 sdebug_store_sectors = sz / SECT_SIZE; 2537 sdebug_store_sectors = sz / scsi_debug_sector_size;
2506 sdebug_capacity = get_sdebug_capacity(); 2538 sdebug_capacity = get_sdebug_capacity();
2507 2539
2508 /* play around with geometry, don't waste too much on track 0 */ 2540 /* play around with geometry, don't waste too much on track 0 */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index eaf5a8add1ba..006a95916f72 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -298,6 +298,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
298 */ 298 */
299static int scsi_check_sense(struct scsi_cmnd *scmd) 299static int scsi_check_sense(struct scsi_cmnd *scmd)
300{ 300{
301 struct scsi_device *sdev = scmd->device;
301 struct scsi_sense_hdr sshdr; 302 struct scsi_sense_hdr sshdr;
302 303
303 if (! scsi_command_normalize_sense(scmd, &sshdr)) 304 if (! scsi_command_normalize_sense(scmd, &sshdr))
@@ -306,6 +307,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
306 if (scsi_sense_is_deferred(&sshdr)) 307 if (scsi_sense_is_deferred(&sshdr))
307 return NEEDS_RETRY; 308 return NEEDS_RETRY;
308 309
310 if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
311 sdev->scsi_dh_data->scsi_dh->check_sense) {
312 int rc;
313
314 rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
315 if (rc != SCSI_RETURN_NOT_HANDLED)
316 return rc;
317 /* handler does not care. Drop down to default handling */
318 }
319
309 /* 320 /*
310 * Previous logic looked for FILEMARK, EOM or ILI which are 321 * Previous logic looked for FILEMARK, EOM or ILI which are
311 * mainly associated with tapes and returned SUCCESS. 322 * mainly associated with tapes and returned SUCCESS.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index cbf55d59a54c..88d1b5f44e59 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
65}; 65};
66#undef SP 66#undef SP
67 67
68static struct kmem_cache *scsi_bidi_sdb_cache; 68static struct kmem_cache *scsi_sdb_cache;
69 69
70static void scsi_run_queue(struct request_queue *q); 70static void scsi_run_queue(struct request_queue *q);
71 71
@@ -784,7 +784,7 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
784 struct scsi_data_buffer *bidi_sdb = 784 struct scsi_data_buffer *bidi_sdb =
785 cmd->request->next_rq->special; 785 cmd->request->next_rq->special;
786 scsi_free_sgtable(bidi_sdb); 786 scsi_free_sgtable(bidi_sdb);
787 kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb); 787 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
788 cmd->request->next_rq->special = NULL; 788 cmd->request->next_rq->special = NULL;
789 } 789 }
790} 790}
@@ -1059,7 +1059,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1059 1059
1060 if (blk_bidi_rq(cmd->request)) { 1060 if (blk_bidi_rq(cmd->request)) {
1061 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 1061 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1062 scsi_bidi_sdb_cache, GFP_ATOMIC); 1062 scsi_sdb_cache, GFP_ATOMIC);
1063 if (!bidi_sdb) { 1063 if (!bidi_sdb) {
1064 error = BLKPREP_DEFER; 1064 error = BLKPREP_DEFER;
1065 goto err_exit; 1065 goto err_exit;
@@ -1169,6 +1169,14 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1169 1169
1170 if (ret != BLKPREP_OK) 1170 if (ret != BLKPREP_OK)
1171 return ret; 1171 return ret;
1172
1173 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1174 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1175 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1176 if (ret != BLKPREP_OK)
1177 return ret;
1178 }
1179
1172 /* 1180 /*
1173 * Filesystem requests must transfer data. 1181 * Filesystem requests must transfer data.
1174 */ 1182 */
@@ -1329,7 +1337,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1329 printk("scsi%d unblocking host at zero depth\n", 1337 printk("scsi%d unblocking host at zero depth\n",
1330 shost->host_no)); 1338 shost->host_no));
1331 } else { 1339 } else {
1332 blk_plug_device(q);
1333 return 0; 1340 return 0;
1334 } 1341 }
1335 } 1342 }
@@ -1693,11 +1700,11 @@ int __init scsi_init_queue(void)
1693 return -ENOMEM; 1700 return -ENOMEM;
1694 } 1701 }
1695 1702
1696 scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb", 1703 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1697 sizeof(struct scsi_data_buffer), 1704 sizeof(struct scsi_data_buffer),
1698 0, 0, NULL); 1705 0, 0, NULL);
1699 if (!scsi_bidi_sdb_cache) { 1706 if (!scsi_sdb_cache) {
1700 printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n"); 1707 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1701 goto cleanup_io_context; 1708 goto cleanup_io_context;
1702 } 1709 }
1703 1710
@@ -1710,7 +1717,7 @@ int __init scsi_init_queue(void)
1710 if (!sgp->slab) { 1717 if (!sgp->slab) {
1711 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1718 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1712 sgp->name); 1719 sgp->name);
1713 goto cleanup_bidi_sdb; 1720 goto cleanup_sdb;
1714 } 1721 }
1715 1722
1716 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1723 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
@@ -1718,13 +1725,13 @@ int __init scsi_init_queue(void)
1718 if (!sgp->pool) { 1725 if (!sgp->pool) {
1719 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1726 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1720 sgp->name); 1727 sgp->name);
1721 goto cleanup_bidi_sdb; 1728 goto cleanup_sdb;
1722 } 1729 }
1723 } 1730 }
1724 1731
1725 return 0; 1732 return 0;
1726 1733
1727cleanup_bidi_sdb: 1734cleanup_sdb:
1728 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1735 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1729 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1736 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1730 if (sgp->pool) 1737 if (sgp->pool)
@@ -1732,7 +1739,7 @@ cleanup_bidi_sdb:
1732 if (sgp->slab) 1739 if (sgp->slab)
1733 kmem_cache_destroy(sgp->slab); 1740 kmem_cache_destroy(sgp->slab);
1734 } 1741 }
1735 kmem_cache_destroy(scsi_bidi_sdb_cache); 1742 kmem_cache_destroy(scsi_sdb_cache);
1736cleanup_io_context: 1743cleanup_io_context:
1737 kmem_cache_destroy(scsi_io_context_cache); 1744 kmem_cache_destroy(scsi_io_context_cache);
1738 1745
@@ -1744,7 +1751,7 @@ void scsi_exit_queue(void)
1744 int i; 1751 int i;
1745 1752
1746 kmem_cache_destroy(scsi_io_context_cache); 1753 kmem_cache_destroy(scsi_io_context_cache);
1747 kmem_cache_destroy(scsi_bidi_sdb_cache); 1754 kmem_cache_destroy(scsi_sdb_cache);
1748 1755
1749 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1756 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1750 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1757 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a00eee6f7be9..196fe3af0d5e 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -346,7 +346,7 @@ static void scsi_target_dev_release(struct device *dev)
346 put_device(parent); 346 put_device(parent);
347} 347}
348 348
349struct device_type scsi_target_type = { 349static struct device_type scsi_target_type = {
350 .name = "scsi_target", 350 .name = "scsi_target",
351 .release = scsi_target_dev_release, 351 .release = scsi_target_dev_release,
352}; 352};
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 93d2b6714453..b6e561059779 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -439,6 +439,7 @@ struct bus_type scsi_bus_type = {
439 .resume = scsi_bus_resume, 439 .resume = scsi_bus_resume,
440 .remove = scsi_bus_remove, 440 .remove = scsi_bus_remove,
441}; 441};
442EXPORT_SYMBOL_GPL(scsi_bus_type);
442 443
443int scsi_sysfs_register(void) 444int scsi_sysfs_register(void)
444{ 445{
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 65d1737eb664..3af7cbcc5c5d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,10 +30,11 @@
30#include <scsi/scsi_transport_iscsi.h> 30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/iscsi_if.h> 31#include <scsi/iscsi_if.h>
32 32
33#define ISCSI_SESSION_ATTRS 19 33#define ISCSI_SESSION_ATTRS 21
34#define ISCSI_CONN_ATTRS 13 34#define ISCSI_CONN_ATTRS 13
35#define ISCSI_HOST_ATTRS 4 35#define ISCSI_HOST_ATTRS 4
36#define ISCSI_TRANSPORT_VERSION "2.0-869" 36
37#define ISCSI_TRANSPORT_VERSION "2.0-870"
37 38
38struct iscsi_internal { 39struct iscsi_internal {
39 int daemon_pid; 40 int daemon_pid;
@@ -101,16 +102,10 @@ show_transport_##name(struct device *dev, \
101static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); 102static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
102 103
103show_transport_attr(caps, "0x%x"); 104show_transport_attr(caps, "0x%x");
104show_transport_attr(max_lun, "%d");
105show_transport_attr(max_conn, "%d");
106show_transport_attr(max_cmd_len, "%d");
107 105
108static struct attribute *iscsi_transport_attrs[] = { 106static struct attribute *iscsi_transport_attrs[] = {
109 &dev_attr_handle.attr, 107 &dev_attr_handle.attr,
110 &dev_attr_caps.attr, 108 &dev_attr_caps.attr,
111 &dev_attr_max_lun.attr,
112 &dev_attr_max_conn.attr,
113 &dev_attr_max_cmd_len.attr,
114 NULL, 109 NULL,
115}; 110};
116 111
@@ -118,18 +113,139 @@ static struct attribute_group iscsi_transport_group = {
118 .attrs = iscsi_transport_attrs, 113 .attrs = iscsi_transport_attrs,
119}; 114};
120 115
116/*
117 * iSCSI endpoint attrs
118 */
119#define iscsi_dev_to_endpoint(_dev) \
120 container_of(_dev, struct iscsi_endpoint, dev)
121
122#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
123struct device_attribute dev_attr_##_prefix##_##_name = \
124 __ATTR(_name,_mode,_show,_store)
125
126static void iscsi_endpoint_release(struct device *dev)
127{
128 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
129 kfree(ep);
130}
131
132static struct class iscsi_endpoint_class = {
133 .name = "iscsi_endpoint",
134 .dev_release = iscsi_endpoint_release,
135};
136
137static ssize_t
138show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
139{
140 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
141 return sprintf(buf, "%u\n", ep->id);
142}
143static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
144
145static struct attribute *iscsi_endpoint_attrs[] = {
146 &dev_attr_ep_handle.attr,
147 NULL,
148};
149
150static struct attribute_group iscsi_endpoint_group = {
151 .attrs = iscsi_endpoint_attrs,
152};
121 153
154#define ISCSI_MAX_EPID -1
155
156static int iscsi_match_epid(struct device *dev, void *data)
157{
158 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
159 unsigned int *epid = (unsigned int *) data;
160
161 return *epid == ep->id;
162}
163
164struct iscsi_endpoint *
165iscsi_create_endpoint(int dd_size)
166{
167 struct device *dev;
168 struct iscsi_endpoint *ep;
169 unsigned int id;
170 int err;
171
172 for (id = 1; id < ISCSI_MAX_EPID; id++) {
173 dev = class_find_device(&iscsi_endpoint_class, &id,
174 iscsi_match_epid);
175 if (!dev)
176 break;
177 }
178 if (id == ISCSI_MAX_EPID) {
179 printk(KERN_ERR "Too many connections. Max supported %u\n",
180 ISCSI_MAX_EPID - 1);
181 return NULL;
182 }
183
184 ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
185 if (!ep)
186 return NULL;
187
188 ep->id = id;
189 ep->dev.class = &iscsi_endpoint_class;
190 snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
191 err = device_register(&ep->dev);
192 if (err)
193 goto free_ep;
194
195 err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
196 if (err)
197 goto unregister_dev;
198
199 if (dd_size)
200 ep->dd_data = &ep[1];
201 return ep;
202
203unregister_dev:
204 device_unregister(&ep->dev);
205 return NULL;
206
207free_ep:
208 kfree(ep);
209 return NULL;
210}
211EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
212
213void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
214{
215 sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
216 device_unregister(&ep->dev);
217}
218EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
219
220struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
221{
222 struct iscsi_endpoint *ep;
223 struct device *dev;
224
225 dev = class_find_device(&iscsi_endpoint_class, &handle,
226 iscsi_match_epid);
227 if (!dev)
228 return NULL;
229
230 ep = iscsi_dev_to_endpoint(dev);
231 /*
232 * we can drop this now because the interface will prevent
233 * removals and lookups from racing.
234 */
235 put_device(dev);
236 return ep;
237}
238EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
122 239
123static int iscsi_setup_host(struct transport_container *tc, struct device *dev, 240static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
124 struct device *cdev) 241 struct device *cdev)
125{ 242{
126 struct Scsi_Host *shost = dev_to_shost(dev); 243 struct Scsi_Host *shost = dev_to_shost(dev);
127 struct iscsi_host *ihost = shost->shost_data; 244 struct iscsi_cls_host *ihost = shost->shost_data;
128 245
129 memset(ihost, 0, sizeof(*ihost)); 246 memset(ihost, 0, sizeof(*ihost));
130 INIT_LIST_HEAD(&ihost->sessions);
131 mutex_init(&ihost->mutex);
132 atomic_set(&ihost->nr_scans, 0); 247 atomic_set(&ihost->nr_scans, 0);
248 mutex_init(&ihost->mutex);
133 249
134 snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d", 250 snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
135 shost->host_no); 251 shost->host_no);
@@ -144,7 +260,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
144 struct device *cdev) 260 struct device *cdev)
145{ 261{
146 struct Scsi_Host *shost = dev_to_shost(dev); 262 struct Scsi_Host *shost = dev_to_shost(dev);
147 struct iscsi_host *ihost = shost->shost_data; 263 struct iscsi_cls_host *ihost = shost->shost_data;
148 264
149 destroy_workqueue(ihost->scan_workq); 265 destroy_workqueue(ihost->scan_workq);
150 return 0; 266 return 0;
@@ -287,6 +403,24 @@ static int iscsi_is_session_dev(const struct device *dev)
287 return dev->release == iscsi_session_release; 403 return dev->release == iscsi_session_release;
288} 404}
289 405
406static int iscsi_iter_session_fn(struct device *dev, void *data)
407{
408 void (* fn) (struct iscsi_cls_session *) = data;
409
410 if (!iscsi_is_session_dev(dev))
411 return 0;
412 fn(iscsi_dev_to_session(dev));
413 return 0;
414}
415
416void iscsi_host_for_each_session(struct Scsi_Host *shost,
417 void (*fn)(struct iscsi_cls_session *))
418{
419 device_for_each_child(&shost->shost_gendev, fn,
420 iscsi_iter_session_fn);
421}
422EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
423
290/** 424/**
291 * iscsi_scan_finished - helper to report when running scans are done 425 * iscsi_scan_finished - helper to report when running scans are done
292 * @shost: scsi host 426 * @shost: scsi host
@@ -297,7 +431,7 @@ static int iscsi_is_session_dev(const struct device *dev)
297 */ 431 */
298int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time) 432int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
299{ 433{
300 struct iscsi_host *ihost = shost->shost_data; 434 struct iscsi_cls_host *ihost = shost->shost_data;
301 /* 435 /*
302 * qla4xxx will have kicked off some session unblocks before calling 436 * qla4xxx will have kicked off some session unblocks before calling
303 * scsi_scan_host, so just wait for them to complete. 437 * scsi_scan_host, so just wait for them to complete.
@@ -306,42 +440,76 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
306} 440}
307EXPORT_SYMBOL_GPL(iscsi_scan_finished); 441EXPORT_SYMBOL_GPL(iscsi_scan_finished);
308 442
309static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, 443struct iscsi_scan_data {
310 uint id, uint lun) 444 unsigned int channel;
445 unsigned int id;
446 unsigned int lun;
447};
448
449static int iscsi_user_scan_session(struct device *dev, void *data)
311{ 450{
312 struct iscsi_host *ihost = shost->shost_data; 451 struct iscsi_scan_data *scan_data = data;
313 struct iscsi_cls_session *session; 452 struct iscsi_cls_session *session;
453 struct Scsi_Host *shost;
454 struct iscsi_cls_host *ihost;
455 unsigned long flags;
456 unsigned int id;
457
458 if (!iscsi_is_session_dev(dev))
459 return 0;
460
461 session = iscsi_dev_to_session(dev);
462 shost = iscsi_session_to_shost(session);
463 ihost = shost->shost_data;
314 464
315 mutex_lock(&ihost->mutex); 465 mutex_lock(&ihost->mutex);
316 list_for_each_entry(session, &ihost->sessions, host_list) { 466 spin_lock_irqsave(&session->lock, flags);
317 if ((channel == SCAN_WILD_CARD || channel == 0) && 467 if (session->state != ISCSI_SESSION_LOGGED_IN) {
318 (id == SCAN_WILD_CARD || id == session->target_id)) 468 spin_unlock_irqrestore(&session->lock, flags);
319 scsi_scan_target(&session->dev, 0, 469 mutex_unlock(&ihost->mutex);
320 session->target_id, lun, 1); 470 return 0;
321 } 471 }
322 mutex_unlock(&ihost->mutex); 472 id = session->target_id;
473 spin_unlock_irqrestore(&session->lock, flags);
323 474
475 if (id != ISCSI_MAX_TARGET) {
476 if ((scan_data->channel == SCAN_WILD_CARD ||
477 scan_data->channel == 0) &&
478 (scan_data->id == SCAN_WILD_CARD ||
479 scan_data->id == id))
480 scsi_scan_target(&session->dev, 0, id,
481 scan_data->lun, 1);
482 }
483 mutex_unlock(&ihost->mutex);
324 return 0; 484 return 0;
325} 485}
326 486
487static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
488 uint id, uint lun)
489{
490 struct iscsi_scan_data scan_data;
491
492 scan_data.channel = channel;
493 scan_data.id = id;
494 scan_data.lun = lun;
495
496 return device_for_each_child(&shost->shost_gendev, &scan_data,
497 iscsi_user_scan_session);
498}
499
327static void iscsi_scan_session(struct work_struct *work) 500static void iscsi_scan_session(struct work_struct *work)
328{ 501{
329 struct iscsi_cls_session *session = 502 struct iscsi_cls_session *session =
330 container_of(work, struct iscsi_cls_session, scan_work); 503 container_of(work, struct iscsi_cls_session, scan_work);
331 struct Scsi_Host *shost = iscsi_session_to_shost(session); 504 struct Scsi_Host *shost = iscsi_session_to_shost(session);
332 struct iscsi_host *ihost = shost->shost_data; 505 struct iscsi_cls_host *ihost = shost->shost_data;
333 unsigned long flags; 506 struct iscsi_scan_data scan_data;
334 507
335 spin_lock_irqsave(&session->lock, flags); 508 scan_data.channel = 0;
336 if (session->state != ISCSI_SESSION_LOGGED_IN) { 509 scan_data.id = SCAN_WILD_CARD;
337 spin_unlock_irqrestore(&session->lock, flags); 510 scan_data.lun = SCAN_WILD_CARD;
338 goto done;
339 }
340 spin_unlock_irqrestore(&session->lock, flags);
341 511
342 scsi_scan_target(&session->dev, 0, session->target_id, 512 iscsi_user_scan_session(&session->dev, &scan_data);
343 SCAN_WILD_CARD, 1);
344done:
345 atomic_dec(&ihost->nr_scans); 513 atomic_dec(&ihost->nr_scans);
346} 514}
347 515
@@ -381,7 +549,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
381 container_of(work, struct iscsi_cls_session, 549 container_of(work, struct iscsi_cls_session,
382 unblock_work); 550 unblock_work);
383 struct Scsi_Host *shost = iscsi_session_to_shost(session); 551 struct Scsi_Host *shost = iscsi_session_to_shost(session);
384 struct iscsi_host *ihost = shost->shost_data; 552 struct iscsi_cls_host *ihost = shost->shost_data;
385 unsigned long flags; 553 unsigned long flags;
386 554
387 /* 555 /*
@@ -449,15 +617,19 @@ static void __iscsi_unbind_session(struct work_struct *work)
449 container_of(work, struct iscsi_cls_session, 617 container_of(work, struct iscsi_cls_session,
450 unbind_work); 618 unbind_work);
451 struct Scsi_Host *shost = iscsi_session_to_shost(session); 619 struct Scsi_Host *shost = iscsi_session_to_shost(session);
452 struct iscsi_host *ihost = shost->shost_data; 620 struct iscsi_cls_host *ihost = shost->shost_data;
621 unsigned long flags;
453 622
454 /* Prevent new scans and make sure scanning is not in progress */ 623 /* Prevent new scans and make sure scanning is not in progress */
455 mutex_lock(&ihost->mutex); 624 mutex_lock(&ihost->mutex);
456 if (list_empty(&session->host_list)) { 625 spin_lock_irqsave(&session->lock, flags);
626 if (session->target_id == ISCSI_MAX_TARGET) {
627 spin_unlock_irqrestore(&session->lock, flags);
457 mutex_unlock(&ihost->mutex); 628 mutex_unlock(&ihost->mutex);
458 return; 629 return;
459 } 630 }
460 list_del_init(&session->host_list); 631 session->target_id = ISCSI_MAX_TARGET;
632 spin_unlock_irqrestore(&session->lock, flags);
461 mutex_unlock(&ihost->mutex); 633 mutex_unlock(&ihost->mutex);
462 634
463 scsi_remove_target(&session->dev); 635 scsi_remove_target(&session->dev);
@@ -467,18 +639,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
467static int iscsi_unbind_session(struct iscsi_cls_session *session) 639static int iscsi_unbind_session(struct iscsi_cls_session *session)
468{ 640{
469 struct Scsi_Host *shost = iscsi_session_to_shost(session); 641 struct Scsi_Host *shost = iscsi_session_to_shost(session);
470 struct iscsi_host *ihost = shost->shost_data; 642 struct iscsi_cls_host *ihost = shost->shost_data;
471 643
472 return queue_work(ihost->scan_workq, &session->unbind_work); 644 return queue_work(ihost->scan_workq, &session->unbind_work);
473} 645}
474 646
475struct iscsi_cls_session * 647struct iscsi_cls_session *
476iscsi_alloc_session(struct Scsi_Host *shost, 648iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
477 struct iscsi_transport *transport) 649 int dd_size)
478{ 650{
479 struct iscsi_cls_session *session; 651 struct iscsi_cls_session *session;
480 652
481 session = kzalloc(sizeof(*session) + transport->sessiondata_size, 653 session = kzalloc(sizeof(*session) + dd_size,
482 GFP_KERNEL); 654 GFP_KERNEL);
483 if (!session) 655 if (!session)
484 return NULL; 656 return NULL;
@@ -487,7 +659,6 @@ iscsi_alloc_session(struct Scsi_Host *shost,
487 session->recovery_tmo = 120; 659 session->recovery_tmo = 120;
488 session->state = ISCSI_SESSION_FREE; 660 session->state = ISCSI_SESSION_FREE;
489 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 661 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
490 INIT_LIST_HEAD(&session->host_list);
491 INIT_LIST_HEAD(&session->sess_list); 662 INIT_LIST_HEAD(&session->sess_list);
492 INIT_WORK(&session->unblock_work, __iscsi_unblock_session); 663 INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
493 INIT_WORK(&session->block_work, __iscsi_block_session); 664 INIT_WORK(&session->block_work, __iscsi_block_session);
@@ -500,22 +671,57 @@ iscsi_alloc_session(struct Scsi_Host *shost,
500 session->dev.parent = &shost->shost_gendev; 671 session->dev.parent = &shost->shost_gendev;
501 session->dev.release = iscsi_session_release; 672 session->dev.release = iscsi_session_release;
502 device_initialize(&session->dev); 673 device_initialize(&session->dev);
503 if (transport->sessiondata_size) 674 if (dd_size)
504 session->dd_data = &session[1]; 675 session->dd_data = &session[1];
505 return session; 676 return session;
506} 677}
507EXPORT_SYMBOL_GPL(iscsi_alloc_session); 678EXPORT_SYMBOL_GPL(iscsi_alloc_session);
508 679
680static int iscsi_get_next_target_id(struct device *dev, void *data)
681{
682 struct iscsi_cls_session *session;
683 unsigned long flags;
684 int err = 0;
685
686 if (!iscsi_is_session_dev(dev))
687 return 0;
688
689 session = iscsi_dev_to_session(dev);
690 spin_lock_irqsave(&session->lock, flags);
691 if (*((unsigned int *) data) == session->target_id)
692 err = -EEXIST;
693 spin_unlock_irqrestore(&session->lock, flags);
694 return err;
695}
696
509int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) 697int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
510{ 698{
511 struct Scsi_Host *shost = iscsi_session_to_shost(session); 699 struct Scsi_Host *shost = iscsi_session_to_shost(session);
512 struct iscsi_host *ihost; 700 struct iscsi_cls_host *ihost;
513 unsigned long flags; 701 unsigned long flags;
702 unsigned int id = target_id;
514 int err; 703 int err;
515 704
516 ihost = shost->shost_data; 705 ihost = shost->shost_data;
517 session->sid = atomic_add_return(1, &iscsi_session_nr); 706 session->sid = atomic_add_return(1, &iscsi_session_nr);
518 session->target_id = target_id; 707
708 if (id == ISCSI_MAX_TARGET) {
709 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
710 err = device_for_each_child(&shost->shost_gendev, &id,
711 iscsi_get_next_target_id);
712 if (!err)
713 break;
714 }
715
716 if (id == ISCSI_MAX_TARGET) {
717 iscsi_cls_session_printk(KERN_ERR, session,
718 "Too many iscsi targets. Max "
719 "number of targets is %d.\n",
720 ISCSI_MAX_TARGET - 1);
721 goto release_host;
722 }
723 }
724 session->target_id = id;
519 725
520 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", 726 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
521 session->sid); 727 session->sid);
@@ -531,10 +737,6 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
531 list_add(&session->sess_list, &sesslist); 737 list_add(&session->sess_list, &sesslist);
532 spin_unlock_irqrestore(&sesslock, flags); 738 spin_unlock_irqrestore(&sesslock, flags);
533 739
534 mutex_lock(&ihost->mutex);
535 list_add(&session->host_list, &ihost->sessions);
536 mutex_unlock(&ihost->mutex);
537
538 iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); 740 iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
539 return 0; 741 return 0;
540 742
@@ -548,18 +750,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
548 * iscsi_create_session - create iscsi class session 750 * iscsi_create_session - create iscsi class session
549 * @shost: scsi host 751 * @shost: scsi host
550 * @transport: iscsi transport 752 * @transport: iscsi transport
753 * @dd_size: private driver data size
551 * @target_id: which target 754 * @target_id: which target
552 * 755 *
553 * This can be called from a LLD or iscsi_transport. 756 * This can be called from a LLD or iscsi_transport.
554 */ 757 */
555struct iscsi_cls_session * 758struct iscsi_cls_session *
556iscsi_create_session(struct Scsi_Host *shost, 759iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
557 struct iscsi_transport *transport, 760 int dd_size, unsigned int target_id)
558 unsigned int target_id)
559{ 761{
560 struct iscsi_cls_session *session; 762 struct iscsi_cls_session *session;
561 763
562 session = iscsi_alloc_session(shost, transport); 764 session = iscsi_alloc_session(shost, transport, dd_size);
563 if (!session) 765 if (!session)
564 return NULL; 766 return NULL;
565 767
@@ -595,7 +797,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
595void iscsi_remove_session(struct iscsi_cls_session *session) 797void iscsi_remove_session(struct iscsi_cls_session *session)
596{ 798{
597 struct Scsi_Host *shost = iscsi_session_to_shost(session); 799 struct Scsi_Host *shost = iscsi_session_to_shost(session);
598 struct iscsi_host *ihost = shost->shost_data; 800 struct iscsi_cls_host *ihost = shost->shost_data;
599 unsigned long flags; 801 unsigned long flags;
600 int err; 802 int err;
601 803
@@ -661,6 +863,7 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
661/** 863/**
662 * iscsi_create_conn - create iscsi class connection 864 * iscsi_create_conn - create iscsi class connection
663 * @session: iscsi cls session 865 * @session: iscsi cls session
866 * @dd_size: private driver data size
664 * @cid: connection id 867 * @cid: connection id
665 * 868 *
666 * This can be called from a LLD or iscsi_transport. The connection 869 * This can be called from a LLD or iscsi_transport. The connection
@@ -673,18 +876,17 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
673 * non-zero. 876 * non-zero.
674 */ 877 */
675struct iscsi_cls_conn * 878struct iscsi_cls_conn *
676iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid) 879iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
677{ 880{
678 struct iscsi_transport *transport = session->transport; 881 struct iscsi_transport *transport = session->transport;
679 struct iscsi_cls_conn *conn; 882 struct iscsi_cls_conn *conn;
680 unsigned long flags; 883 unsigned long flags;
681 int err; 884 int err;
682 885
683 conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL); 886 conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
684 if (!conn) 887 if (!conn)
685 return NULL; 888 return NULL;
686 889 if (dd_size)
687 if (transport->conndata_size)
688 conn->dd_data = &conn[1]; 890 conn->dd_data = &conn[1];
689 891
690 INIT_LIST_HEAD(&conn->conn_list); 892 INIT_LIST_HEAD(&conn->conn_list);
@@ -1017,21 +1219,20 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1017EXPORT_SYMBOL_GPL(iscsi_session_event); 1219EXPORT_SYMBOL_GPL(iscsi_session_event);
1018 1220
1019static int 1221static int
1020iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 1222iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
1223 struct iscsi_uevent *ev, uint32_t initial_cmdsn,
1224 uint16_t cmds_max, uint16_t queue_depth)
1021{ 1225{
1022 struct iscsi_transport *transport = priv->iscsi_transport; 1226 struct iscsi_transport *transport = priv->iscsi_transport;
1023 struct iscsi_cls_session *session; 1227 struct iscsi_cls_session *session;
1024 uint32_t hostno; 1228 uint32_t host_no;
1025 1229
1026 session = transport->create_session(transport, &priv->t, 1230 session = transport->create_session(ep, cmds_max, queue_depth,
1027 ev->u.c_session.cmds_max, 1231 initial_cmdsn, &host_no);
1028 ev->u.c_session.queue_depth,
1029 ev->u.c_session.initial_cmdsn,
1030 &hostno);
1031 if (!session) 1232 if (!session)
1032 return -ENOMEM; 1233 return -ENOMEM;
1033 1234
1034 ev->r.c_session_ret.host_no = hostno; 1235 ev->r.c_session_ret.host_no = host_no;
1035 ev->r.c_session_ret.sid = session->sid; 1236 ev->r.c_session_ret.sid = session->sid;
1036 return 0; 1237 return 0;
1037} 1238}
@@ -1106,6 +1307,7 @@ static int
1106iscsi_if_transport_ep(struct iscsi_transport *transport, 1307iscsi_if_transport_ep(struct iscsi_transport *transport,
1107 struct iscsi_uevent *ev, int msg_type) 1308 struct iscsi_uevent *ev, int msg_type)
1108{ 1309{
1310 struct iscsi_endpoint *ep;
1109 struct sockaddr *dst_addr; 1311 struct sockaddr *dst_addr;
1110 int rc = 0; 1312 int rc = 0;
1111 1313
@@ -1115,22 +1317,33 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
1115 return -EINVAL; 1317 return -EINVAL;
1116 1318
1117 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); 1319 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1118 rc = transport->ep_connect(dst_addr, 1320 ep = transport->ep_connect(dst_addr,
1119 ev->u.ep_connect.non_blocking, 1321 ev->u.ep_connect.non_blocking);
1120 &ev->r.ep_connect_ret.handle); 1322 if (IS_ERR(ep))
1323 return PTR_ERR(ep);
1324
1325 ev->r.ep_connect_ret.handle = ep->id;
1121 break; 1326 break;
1122 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1327 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1123 if (!transport->ep_poll) 1328 if (!transport->ep_poll)
1124 return -EINVAL; 1329 return -EINVAL;
1125 1330
1126 ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle, 1331 ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
1332 if (!ep)
1333 return -EINVAL;
1334
1335 ev->r.retcode = transport->ep_poll(ep,
1127 ev->u.ep_poll.timeout_ms); 1336 ev->u.ep_poll.timeout_ms);
1128 break; 1337 break;
1129 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: 1338 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
1130 if (!transport->ep_disconnect) 1339 if (!transport->ep_disconnect)
1131 return -EINVAL; 1340 return -EINVAL;
1132 1341
1133 transport->ep_disconnect(ev->u.ep_disconnect.ep_handle); 1342 ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
1343 if (!ep)
1344 return -EINVAL;
1345
1346 transport->ep_disconnect(ep);
1134 break; 1347 break;
1135 } 1348 }
1136 return rc; 1349 return rc;
@@ -1195,6 +1408,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1195 struct iscsi_internal *priv; 1408 struct iscsi_internal *priv;
1196 struct iscsi_cls_session *session; 1409 struct iscsi_cls_session *session;
1197 struct iscsi_cls_conn *conn; 1410 struct iscsi_cls_conn *conn;
1411 struct iscsi_endpoint *ep = NULL;
1198 1412
1199 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 1413 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
1200 if (!priv) 1414 if (!priv)
@@ -1208,7 +1422,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1208 1422
1209 switch (nlh->nlmsg_type) { 1423 switch (nlh->nlmsg_type) {
1210 case ISCSI_UEVENT_CREATE_SESSION: 1424 case ISCSI_UEVENT_CREATE_SESSION:
1211 err = iscsi_if_create_session(priv, ev); 1425 err = iscsi_if_create_session(priv, ep, ev,
1426 ev->u.c_session.initial_cmdsn,
1427 ev->u.c_session.cmds_max,
1428 ev->u.c_session.queue_depth);
1429 break;
1430 case ISCSI_UEVENT_CREATE_BOUND_SESSION:
1431 ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
1432 if (!ep) {
1433 err = -EINVAL;
1434 break;
1435 }
1436
1437 err = iscsi_if_create_session(priv, ep, ev,
1438 ev->u.c_bound_session.initial_cmdsn,
1439 ev->u.c_bound_session.cmds_max,
1440 ev->u.c_bound_session.queue_depth);
1212 break; 1441 break;
1213 case ISCSI_UEVENT_DESTROY_SESSION: 1442 case ISCSI_UEVENT_DESTROY_SESSION:
1214 session = iscsi_session_lookup(ev->u.d_session.sid); 1443 session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -1414,6 +1643,8 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
1414iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); 1643iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
1415iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); 1644iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
1416iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); 1645iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
1646iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
1647iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
1417 1648
1418static ssize_t 1649static ssize_t
1419show_priv_session_state(struct device *dev, struct device_attribute *attr, 1650show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -1580,6 +1811,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
1580 priv->daemon_pid = -1; 1811 priv->daemon_pid = -1;
1581 priv->iscsi_transport = tt; 1812 priv->iscsi_transport = tt;
1582 priv->t.user_scan = iscsi_user_scan; 1813 priv->t.user_scan = iscsi_user_scan;
1814 if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
1815 priv->t.create_work_queue = 1;
1583 1816
1584 priv->dev.class = &iscsi_transport_class; 1817 priv->dev.class = &iscsi_transport_class;
1585 snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name); 1818 snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
@@ -1595,7 +1828,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1595 priv->t.host_attrs.ac.attrs = &priv->host_attrs[0]; 1828 priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
1596 priv->t.host_attrs.ac.class = &iscsi_host_class.class; 1829 priv->t.host_attrs.ac.class = &iscsi_host_class.class;
1597 priv->t.host_attrs.ac.match = iscsi_host_match; 1830 priv->t.host_attrs.ac.match = iscsi_host_match;
1598 priv->t.host_size = sizeof(struct iscsi_host); 1831 priv->t.host_size = sizeof(struct iscsi_cls_host);
1599 transport_container_register(&priv->t.host_attrs); 1832 transport_container_register(&priv->t.host_attrs);
1600 1833
1601 SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME); 1834 SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
@@ -1653,6 +1886,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
1653 SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT); 1886 SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
1654 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); 1887 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
1655 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); 1888 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
1889 SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
1890 SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
1656 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 1891 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
1657 SETUP_PRIV_SESSION_RD_ATTR(state); 1892 SETUP_PRIV_SESSION_RD_ATTR(state);
1658 1893
@@ -1668,6 +1903,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1668 1903
1669unregister_dev: 1904unregister_dev:
1670 device_unregister(&priv->dev); 1905 device_unregister(&priv->dev);
1906 return NULL;
1671free_priv: 1907free_priv:
1672 kfree(priv); 1908 kfree(priv);
1673 return NULL; 1909 return NULL;
@@ -1715,10 +1951,14 @@ static __init int iscsi_transport_init(void)
1715 if (err) 1951 if (err)
1716 return err; 1952 return err;
1717 1953
1718 err = transport_class_register(&iscsi_host_class); 1954 err = class_register(&iscsi_endpoint_class);
1719 if (err) 1955 if (err)
1720 goto unregister_transport_class; 1956 goto unregister_transport_class;
1721 1957
1958 err = transport_class_register(&iscsi_host_class);
1959 if (err)
1960 goto unregister_endpoint_class;
1961
1722 err = transport_class_register(&iscsi_connection_class); 1962 err = transport_class_register(&iscsi_connection_class);
1723 if (err) 1963 if (err)
1724 goto unregister_host_class; 1964 goto unregister_host_class;
@@ -1727,8 +1967,8 @@ static __init int iscsi_transport_init(void)
1727 if (err) 1967 if (err)
1728 goto unregister_conn_class; 1968 goto unregister_conn_class;
1729 1969
1730 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL, 1970 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
1731 THIS_MODULE); 1971 NULL, THIS_MODULE);
1732 if (!nls) { 1972 if (!nls) {
1733 err = -ENOBUFS; 1973 err = -ENOBUFS;
1734 goto unregister_session_class; 1974 goto unregister_session_class;
@@ -1748,6 +1988,8 @@ unregister_conn_class:
1748 transport_class_unregister(&iscsi_connection_class); 1988 transport_class_unregister(&iscsi_connection_class);
1749unregister_host_class: 1989unregister_host_class:
1750 transport_class_unregister(&iscsi_host_class); 1990 transport_class_unregister(&iscsi_host_class);
1991unregister_endpoint_class:
1992 class_unregister(&iscsi_endpoint_class);
1751unregister_transport_class: 1993unregister_transport_class:
1752 class_unregister(&iscsi_transport_class); 1994 class_unregister(&iscsi_transport_class);
1753 return err; 1995 return err;
@@ -1760,6 +2002,7 @@ static void __exit iscsi_transport_exit(void)
1760 transport_class_unregister(&iscsi_connection_class); 2002 transport_class_unregister(&iscsi_connection_class);
1761 transport_class_unregister(&iscsi_session_class); 2003 transport_class_unregister(&iscsi_session_class);
1762 transport_class_unregister(&iscsi_host_class); 2004 transport_class_unregister(&iscsi_host_class);
2005 class_unregister(&iscsi_endpoint_class);
1763 class_unregister(&iscsi_transport_class); 2006 class_unregister(&iscsi_transport_class);
1764} 2007}
1765 2008
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d53312c42547..0c63947d8a9d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -58,8 +58,8 @@
58#include <scsi/scsi_host.h> 58#include <scsi/scsi_host.h>
59#include <scsi/scsi_ioctl.h> 59#include <scsi/scsi_ioctl.h>
60#include <scsi/scsicam.h> 60#include <scsi/scsicam.h>
61#include <scsi/sd.h>
62 61
62#include "sd.h"
63#include "scsi_logging.h" 63#include "scsi_logging.h"
64 64
65MODULE_AUTHOR("Eric Youngdale"); 65MODULE_AUTHOR("Eric Youngdale");
@@ -295,11 +295,6 @@ static int sd_major(int major_idx)
295 } 295 }
296} 296}
297 297
298static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
299{
300 return container_of(disk->private_data, struct scsi_disk, driver);
301}
302
303static struct scsi_disk *__scsi_disk_get(struct gendisk *disk) 298static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
304{ 299{
305 struct scsi_disk *sdkp = NULL; 300 struct scsi_disk *sdkp = NULL;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
new file mode 100644
index 000000000000..03a3d45cfa42
--- /dev/null
+++ b/drivers/scsi/sd.h
@@ -0,0 +1,62 @@
1#ifndef _SCSI_DISK_H
2#define _SCSI_DISK_H
3
4/*
5 * More than enough for everybody ;) The huge number of majors
6 * is a leftover from 16bit dev_t days, we don't really need that
7 * much numberspace.
8 */
9#define SD_MAJORS 16
10
11/*
12 * This is limited by the naming scheme enforced in sd_probe,
13 * add another character to it if you really need more disks.
14 */
15#define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26)
16
17/*
18 * Time out in seconds for disks and Magneto-opticals (which are slower).
19 */
20#define SD_TIMEOUT (30 * HZ)
21#define SD_MOD_TIMEOUT (75 * HZ)
22
23/*
24 * Number of allowed retries
25 */
26#define SD_MAX_RETRIES 5
27#define SD_PASSTHROUGH_RETRIES 1
28
29/*
30 * Size of the initial data buffer for mode and read capacity data
31 */
32#define SD_BUF_SIZE 512
33
34struct scsi_disk {
35 struct scsi_driver *driver; /* always &sd_template */
36 struct scsi_device *device;
37 struct device dev;
38 struct gendisk *disk;
39 unsigned int openers; /* protected by BKL for now, yuck */
40 sector_t capacity; /* size in 512-byte sectors */
41 u32 index;
42 u8 media_present;
43 u8 write_prot;
44 unsigned previous_state : 1;
45 unsigned WCE : 1; /* state of disk WCE bit */
46 unsigned RCD : 1; /* state of disk RCD bit, unused */
47 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
48};
49#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
50
51static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
52{
53 return container_of(disk->private_data, struct scsi_disk, driver);
54}
55
56#define sd_printk(prefix, sdsk, fmt, a...) \
57 (sdsk)->disk ? \
58 sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \
59 (sdsk)->disk->disk_name, ##a) : \
60 sdev_printk(prefix, (sdsk)->device, fmt, ##a)
61
62#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index fccd2e88d600..d3b8ebb83776 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1036,6 +1036,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
1036 case SG_SCSI_RESET_DEVICE: 1036 case SG_SCSI_RESET_DEVICE:
1037 val = SCSI_TRY_RESET_DEVICE; 1037 val = SCSI_TRY_RESET_DEVICE;
1038 break; 1038 break;
1039 case SG_SCSI_RESET_TARGET:
1040 val = SCSI_TRY_RESET_TARGET;
1041 break;
1039 case SG_SCSI_RESET_BUS: 1042 case SG_SCSI_RESET_BUS:
1040 val = SCSI_TRY_RESET_BUS; 1043 val = SCSI_TRY_RESET_BUS;
1041 break; 1044 break;
diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h
index 0433d5d0caf3..430537183c18 100644
--- a/drivers/scsi/sym53c8xx_2/sym_misc.h
+++ b/drivers/scsi/sym53c8xx_2/sym_misc.h
@@ -121,9 +121,7 @@ static __inline void sym_que_move(struct sym_quehead *orig,
121 } 121 }
122} 122}
123 123
124#define sym_que_entry(ptr, type, member) \ 124#define sym_que_entry(ptr, type, member) container_of(ptr, type, member)
125 ((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
126
127 125
128#define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) 126#define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)
129 127