diff options
83 files changed, 4100 insertions, 1205 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe index 50e2a80ea28f..21640eaad371 100644 --- a/Documentation/ABI/testing/sysfs-bus-fcoe +++ b/Documentation/ABI/testing/sysfs-bus-fcoe | |||
@@ -1,14 +1,53 @@ | |||
1 | What: /sys/bus/fcoe/ctlr_X | 1 | What: /sys/bus/fcoe/ |
2 | Date: August 2012 | ||
3 | KernelVersion: TBD | ||
4 | Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org | ||
5 | Description: The FCoE bus. Attributes in this directory are control interfaces. | ||
6 | Attributes: | ||
7 | |||
8 | ctlr_create: 'FCoE Controller' instance creation interface. Writing an | ||
9 | <ifname> to this file will allocate and populate sysfs with a | ||
10 | fcoe_ctlr_device (ctlr_X). The user can then configure any | ||
11 | per-port settings and finally write to the fcoe_ctlr_device's | ||
12 | 'start' attribute to begin the kernel's discovery and login | ||
13 | process. | ||
14 | |||
15 | ctlr_destroy: 'FCoE Controller' instance removal interface. Writing a | ||
16 | fcoe_ctlr_device's sysfs name to this file will log the | ||
17 | fcoe_ctlr_device out of the fabric or otherwise connected | ||
18 | FCoE devices. It will also free all kernel memory allocated | ||
19 | for this fcoe_ctlr_device and any structures associated | ||
20 | with it, this includes the scsi_host. | ||
21 | |||
22 | What: /sys/bus/fcoe/devices/ctlr_X | ||
2 | Date: March 2012 | 23 | Date: March 2012 |
3 | KernelVersion: TBD | 24 | KernelVersion: TBD |
4 | Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org | 25 | Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org |
5 | Description: 'FCoE Controller' instances on the fcoe bus | 26 | Description: 'FCoE Controller' instances on the fcoe bus. |
27 | The FCoE Controller now has a three stage creation process. | ||
28 | 1) Write interface name to ctlr_create 2) Configure the FCoE | ||
29 | Controller (ctlr_X) 3) Enable the FCoE Controller to begin | ||
30 | discovery and login. The FCoE Controller is destroyed by | ||
31 | writing it's name, i.e. ctlr_X to the ctlr_delete file. | ||
32 | |||
6 | Attributes: | 33 | Attributes: |
7 | 34 | ||
8 | fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing | 35 | fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing |
9 | this value will change the dev_loss_tmo for all | 36 | this value will change the dev_loss_tmo for all |
10 | FCFs discovered by this controller. | 37 | FCFs discovered by this controller. |
11 | 38 | ||
39 | mode: Display or change the FCoE Controller's mode. Possible | ||
40 | modes are 'Fabric' and 'VN2VN'. If a FCoE Controller | ||
41 | is started in 'Fabric' mode then FIP FCF discovery is | ||
42 | initiated and ultimately a fabric login is attempted. | ||
43 | If a FCoE Controller is started in 'VN2VN' mode then | ||
44 | FIP VN2VN discovery and login is performed. A FCoE | ||
45 | Controller only supports one mode at a time. | ||
46 | |||
47 | enabled: Whether an FCoE controller is enabled or disabled. | ||
48 | 0 if disabled, 1 if enabled. Writing either 0 or 1 | ||
49 | to this file will enable or disable the FCoE controller. | ||
50 | |||
12 | lesb/link_fail: Link Error Status Block (LESB) link failure count. | 51 | lesb/link_fail: Link Error Status Block (LESB) link failure count. |
13 | 52 | ||
14 | lesb/vlink_fail: Link Error Status Block (LESB) virtual link | 53 | lesb/vlink_fail: Link Error Status Block (LESB) virtual link |
@@ -26,7 +65,7 @@ Attributes: | |||
26 | 65 | ||
27 | Notes: ctlr_X (global increment starting at 0) | 66 | Notes: ctlr_X (global increment starting at 0) |
28 | 67 | ||
29 | What: /sys/bus/fcoe/fcf_X | 68 | What: /sys/bus/fcoe/devices/fcf_X |
30 | Date: March 2012 | 69 | Date: March 2012 |
31 | KernelVersion: TBD | 70 | KernelVersion: TBD |
32 | Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org | 71 | Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org |
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas index da03146c182a..09673c7fc8ee 100644 --- a/Documentation/scsi/ChangeLog.megaraid_sas +++ b/Documentation/scsi/ChangeLog.megaraid_sas | |||
@@ -1,3 +1,12 @@ | |||
1 | Release Date : Sat. Feb 9, 2013 17:00:00 PST 2013 - | ||
2 | (emaild-id:megaraidlinux@lsi.com) | ||
3 | Adam Radford | ||
4 | Current Version : 06.506.00.00-rc1 | ||
5 | Old Version : 06.504.01.00-rc1 | ||
6 | 1. Add 4k FastPath DIF support. | ||
7 | 2. Dont load DevHandle unless FastPath enabled. | ||
8 | 3. Version and Changelog update. | ||
9 | ------------------------------------------------------------------------------- | ||
1 | Release Date : Mon. Oct 1, 2012 17:00:00 PST 2012 - | 10 | Release Date : Mon. Oct 1, 2012 17:00:00 PST 2012 - |
2 | (emaild-id:megaraidlinux@lsi.com) | 11 | (emaild-id:megaraidlinux@lsi.com) |
3 | Adam Radford | 12 | Adam Radford |
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 3b021ec63255..e2e349204e7d 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c | |||
@@ -407,7 +407,7 @@ static int aac_src_deliver_message(struct fib *fib) | |||
407 | fib->hw_fib_va->header.StructType = FIB_MAGIC2; | 407 | fib->hw_fib_va->header.StructType = FIB_MAGIC2; |
408 | fib->hw_fib_va->header.SenderFibAddress = (u32)address; | 408 | fib->hw_fib_va->header.SenderFibAddress = (u32)address; |
409 | fib->hw_fib_va->header.u.TimeStamp = 0; | 409 | fib->hw_fib_va->header.u.TimeStamp = 0; |
410 | BUG_ON((u32)(address >> 32) != 0L); | 410 | BUG_ON(upper_32_bits(address) != 0L); |
411 | address |= fibsize; | 411 | address |= fibsize; |
412 | } else { | 412 | } else { |
413 | /* Calculate the amount to the fibsize bits */ | 413 | /* Calculate the amount to the fibsize bits */ |
@@ -431,7 +431,7 @@ static int aac_src_deliver_message(struct fib *fib) | |||
431 | address |= fibsize; | 431 | address |= fibsize; |
432 | } | 432 | } |
433 | 433 | ||
434 | src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff); | 434 | src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff); |
435 | src_writel(dev, MUnit.IQ_L, address & 0xffffffff); | 435 | src_writel(dev, MUnit.IQ_L, address & 0xffffffff); |
436 | 436 | ||
437 | return 0; | 437 | return 0; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 6401db494ef5..2daf4b0da434 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -62,6 +62,10 @@ static int bnx2fc_destroy(struct net_device *net_device); | |||
62 | static int bnx2fc_enable(struct net_device *netdev); | 62 | static int bnx2fc_enable(struct net_device *netdev); |
63 | static int bnx2fc_disable(struct net_device *netdev); | 63 | static int bnx2fc_disable(struct net_device *netdev); |
64 | 64 | ||
65 | /* fcoe_syfs control interface handlers */ | ||
66 | static int bnx2fc_ctlr_alloc(struct net_device *netdev); | ||
67 | static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev); | ||
68 | |||
65 | static void bnx2fc_recv_frame(struct sk_buff *skb); | 69 | static void bnx2fc_recv_frame(struct sk_buff *skb); |
66 | 70 | ||
67 | static void bnx2fc_start_disc(struct bnx2fc_interface *interface); | 71 | static void bnx2fc_start_disc(struct bnx2fc_interface *interface); |
@@ -89,7 +93,6 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport); | |||
89 | static void bnx2fc_stop(struct bnx2fc_interface *interface); | 93 | static void bnx2fc_stop(struct bnx2fc_interface *interface); |
90 | static int __init bnx2fc_mod_init(void); | 94 | static int __init bnx2fc_mod_init(void); |
91 | static void __exit bnx2fc_mod_exit(void); | 95 | static void __exit bnx2fc_mod_exit(void); |
92 | static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev); | ||
93 | 96 | ||
94 | unsigned int bnx2fc_debug_level; | 97 | unsigned int bnx2fc_debug_level; |
95 | module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); | 98 | module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); |
@@ -107,44 +110,6 @@ static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport) | |||
107 | ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; | 110 | ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; |
108 | } | 111 | } |
109 | 112 | ||
110 | /** | ||
111 | * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block | ||
112 | * @lport: the local port | ||
113 | * @fc_lesb: the link error status block | ||
114 | */ | ||
115 | static void bnx2fc_get_lesb(struct fc_lport *lport, | ||
116 | struct fc_els_lesb *fc_lesb) | ||
117 | { | ||
118 | struct net_device *netdev = bnx2fc_netdev(lport); | ||
119 | |||
120 | __fcoe_get_lesb(lport, fc_lesb, netdev); | ||
121 | } | ||
122 | |||
123 | static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev) | ||
124 | { | ||
125 | struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); | ||
126 | struct net_device *netdev = bnx2fc_netdev(fip->lp); | ||
127 | struct fcoe_fc_els_lesb *fcoe_lesb; | ||
128 | struct fc_els_lesb fc_lesb; | ||
129 | |||
130 | __fcoe_get_lesb(fip->lp, &fc_lesb, netdev); | ||
131 | fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb); | ||
132 | |||
133 | ctlr_dev->lesb.lesb_link_fail = | ||
134 | ntohl(fcoe_lesb->lesb_link_fail); | ||
135 | ctlr_dev->lesb.lesb_vlink_fail = | ||
136 | ntohl(fcoe_lesb->lesb_vlink_fail); | ||
137 | ctlr_dev->lesb.lesb_miss_fka = | ||
138 | ntohl(fcoe_lesb->lesb_miss_fka); | ||
139 | ctlr_dev->lesb.lesb_symb_err = | ||
140 | ntohl(fcoe_lesb->lesb_symb_err); | ||
141 | ctlr_dev->lesb.lesb_err_block = | ||
142 | ntohl(fcoe_lesb->lesb_err_block); | ||
143 | ctlr_dev->lesb.lesb_fcs_error = | ||
144 | ntohl(fcoe_lesb->lesb_fcs_error); | ||
145 | } | ||
146 | EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb); | ||
147 | |||
148 | static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) | 113 | static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) |
149 | { | 114 | { |
150 | struct fcoe_ctlr_device *ctlr_dev = | 115 | struct fcoe_ctlr_device *ctlr_dev = |
@@ -741,35 +706,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) | |||
741 | return 0; | 706 | return 0; |
742 | } | 707 | } |
743 | 708 | ||
744 | static void bnx2fc_link_speed_update(struct fc_lport *lport) | ||
745 | { | ||
746 | struct fcoe_port *port = lport_priv(lport); | ||
747 | struct bnx2fc_interface *interface = port->priv; | ||
748 | struct net_device *netdev = interface->netdev; | ||
749 | struct ethtool_cmd ecmd; | ||
750 | |||
751 | if (!__ethtool_get_settings(netdev, &ecmd)) { | ||
752 | lport->link_supported_speeds &= | ||
753 | ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); | ||
754 | if (ecmd.supported & (SUPPORTED_1000baseT_Half | | ||
755 | SUPPORTED_1000baseT_Full)) | ||
756 | lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; | ||
757 | if (ecmd.supported & SUPPORTED_10000baseT_Full) | ||
758 | lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; | ||
759 | |||
760 | switch (ethtool_cmd_speed(&ecmd)) { | ||
761 | case SPEED_1000: | ||
762 | lport->link_speed = FC_PORTSPEED_1GBIT; | ||
763 | break; | ||
764 | case SPEED_2500: | ||
765 | lport->link_speed = FC_PORTSPEED_2GBIT; | ||
766 | break; | ||
767 | case SPEED_10000: | ||
768 | lport->link_speed = FC_PORTSPEED_10GBIT; | ||
769 | break; | ||
770 | } | ||
771 | } | ||
772 | } | ||
773 | static int bnx2fc_link_ok(struct fc_lport *lport) | 709 | static int bnx2fc_link_ok(struct fc_lport *lport) |
774 | { | 710 | { |
775 | struct fcoe_port *port = lport_priv(lport); | 711 | struct fcoe_port *port = lport_priv(lport); |
@@ -827,7 +763,7 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) | |||
827 | port->fcoe_pending_queue_active = 0; | 763 | port->fcoe_pending_queue_active = 0; |
828 | setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport); | 764 | setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport); |
829 | 765 | ||
830 | bnx2fc_link_speed_update(lport); | 766 | fcoe_link_speed_update(lport); |
831 | 767 | ||
832 | if (!lport->vport) { | 768 | if (!lport->vport) { |
833 | if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) | 769 | if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) |
@@ -871,6 +807,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, | |||
871 | u16 vlan_id) | 807 | u16 vlan_id) |
872 | { | 808 | { |
873 | struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; | 809 | struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; |
810 | struct fcoe_ctlr_device *cdev; | ||
874 | struct fc_lport *lport; | 811 | struct fc_lport *lport; |
875 | struct fc_lport *vport; | 812 | struct fc_lport *vport; |
876 | struct bnx2fc_interface *interface, *tmp; | 813 | struct bnx2fc_interface *interface, *tmp; |
@@ -930,30 +867,47 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, | |||
930 | BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", | 867 | BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", |
931 | interface->netdev->name, event); | 868 | interface->netdev->name, event); |
932 | 869 | ||
933 | bnx2fc_link_speed_update(lport); | 870 | fcoe_link_speed_update(lport); |
871 | |||
872 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
934 | 873 | ||
935 | if (link_possible && !bnx2fc_link_ok(lport)) { | 874 | if (link_possible && !bnx2fc_link_ok(lport)) { |
936 | /* Reset max recv frame size to default */ | 875 | switch (cdev->enabled) { |
937 | fc_set_mfs(lport, BNX2FC_MFS); | 876 | case FCOE_CTLR_DISABLED: |
938 | /* | 877 | pr_info("Link up while interface is disabled.\n"); |
939 | * ctlr link up will only be handled during | 878 | break; |
940 | * enable to avoid sending discovery solicitation | 879 | case FCOE_CTLR_ENABLED: |
941 | * on a stale vlan | 880 | case FCOE_CTLR_UNUSED: |
942 | */ | 881 | /* Reset max recv frame size to default */ |
943 | if (interface->enabled) | 882 | fc_set_mfs(lport, BNX2FC_MFS); |
944 | fcoe_ctlr_link_up(ctlr); | 883 | /* |
884 | * ctlr link up will only be handled during | ||
885 | * enable to avoid sending discovery | ||
886 | * solicitation on a stale vlan | ||
887 | */ | ||
888 | if (interface->enabled) | ||
889 | fcoe_ctlr_link_up(ctlr); | ||
890 | }; | ||
945 | } else if (fcoe_ctlr_link_down(ctlr)) { | 891 | } else if (fcoe_ctlr_link_down(ctlr)) { |
946 | mutex_lock(&lport->lp_mutex); | 892 | switch (cdev->enabled) { |
947 | list_for_each_entry(vport, &lport->vports, list) | 893 | case FCOE_CTLR_DISABLED: |
948 | fc_host_port_type(vport->host) = | 894 | pr_info("Link down while interface is disabled.\n"); |
949 | FC_PORTTYPE_UNKNOWN; | 895 | break; |
950 | mutex_unlock(&lport->lp_mutex); | 896 | case FCOE_CTLR_ENABLED: |
951 | fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; | 897 | case FCOE_CTLR_UNUSED: |
952 | per_cpu_ptr(lport->stats, | 898 | mutex_lock(&lport->lp_mutex); |
953 | get_cpu())->LinkFailureCount++; | 899 | list_for_each_entry(vport, &lport->vports, list) |
954 | put_cpu(); | 900 | fc_host_port_type(vport->host) = |
955 | fcoe_clean_pending_queue(lport); | 901 | FC_PORTTYPE_UNKNOWN; |
956 | wait_for_upload = 1; | 902 | mutex_unlock(&lport->lp_mutex); |
903 | fc_host_port_type(lport->host) = | ||
904 | FC_PORTTYPE_UNKNOWN; | ||
905 | per_cpu_ptr(lport->stats, | ||
906 | get_cpu())->LinkFailureCount++; | ||
907 | put_cpu(); | ||
908 | fcoe_clean_pending_queue(lport); | ||
909 | wait_for_upload = 1; | ||
910 | }; | ||
957 | } | 911 | } |
958 | } | 912 | } |
959 | mutex_unlock(&bnx2fc_dev_lock); | 913 | mutex_unlock(&bnx2fc_dev_lock); |
@@ -1484,6 +1438,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, | |||
1484 | port = lport_priv(lport); | 1438 | port = lport_priv(lport); |
1485 | port->lport = lport; | 1439 | port->lport = lport; |
1486 | port->priv = interface; | 1440 | port->priv = interface; |
1441 | port->get_netdev = bnx2fc_netdev; | ||
1487 | INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); | 1442 | INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); |
1488 | 1443 | ||
1489 | /* Configure fcoe_port */ | 1444 | /* Configure fcoe_port */ |
@@ -2003,7 +1958,9 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) | |||
2003 | set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); | 1958 | set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); |
2004 | } | 1959 | } |
2005 | 1960 | ||
2006 | 1961 | /** | |
1962 | * Deperecated: Use bnx2fc_enabled() | ||
1963 | */ | ||
2007 | static int bnx2fc_disable(struct net_device *netdev) | 1964 | static int bnx2fc_disable(struct net_device *netdev) |
2008 | { | 1965 | { |
2009 | struct bnx2fc_interface *interface; | 1966 | struct bnx2fc_interface *interface; |
@@ -2029,7 +1986,9 @@ static int bnx2fc_disable(struct net_device *netdev) | |||
2029 | return rc; | 1986 | return rc; |
2030 | } | 1987 | } |
2031 | 1988 | ||
2032 | 1989 | /** | |
1990 | * Deprecated: Use bnx2fc_enabled() | ||
1991 | */ | ||
2033 | static int bnx2fc_enable(struct net_device *netdev) | 1992 | static int bnx2fc_enable(struct net_device *netdev) |
2034 | { | 1993 | { |
2035 | struct bnx2fc_interface *interface; | 1994 | struct bnx2fc_interface *interface; |
@@ -2055,17 +2014,57 @@ static int bnx2fc_enable(struct net_device *netdev) | |||
2055 | } | 2014 | } |
2056 | 2015 | ||
2057 | /** | 2016 | /** |
2058 | * bnx2fc_create - Create bnx2fc FCoE interface | 2017 | * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller |
2018 | * @cdev: The FCoE Controller that is being enabled or disabled | ||
2019 | * | ||
2020 | * fcoe_sysfs will ensure that the state of 'enabled' has | ||
2021 | * changed, so no checking is necessary here. This routine simply | ||
2022 | * calls fcoe_enable or fcoe_disable, both of which are deprecated. | ||
2023 | * When those routines are removed the functionality can be merged | ||
2024 | * here. | ||
2025 | */ | ||
2026 | static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev) | ||
2027 | { | ||
2028 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); | ||
2029 | struct fc_lport *lport = ctlr->lp; | ||
2030 | struct net_device *netdev = bnx2fc_netdev(lport); | ||
2031 | |||
2032 | switch (cdev->enabled) { | ||
2033 | case FCOE_CTLR_ENABLED: | ||
2034 | return bnx2fc_enable(netdev); | ||
2035 | case FCOE_CTLR_DISABLED: | ||
2036 | return bnx2fc_disable(netdev); | ||
2037 | case FCOE_CTLR_UNUSED: | ||
2038 | default: | ||
2039 | return -ENOTSUPP; | ||
2040 | }; | ||
2041 | } | ||
2042 | |||
2043 | enum bnx2fc_create_link_state { | ||
2044 | BNX2FC_CREATE_LINK_DOWN, | ||
2045 | BNX2FC_CREATE_LINK_UP, | ||
2046 | }; | ||
2047 | |||
2048 | /** | ||
2049 | * _bnx2fc_create() - Create bnx2fc FCoE interface | ||
2050 | * @netdev : The net_device object the Ethernet interface to create on | ||
2051 | * @fip_mode: The FIP mode for this creation | ||
2052 | * @link_state: The ctlr link state on creation | ||
2059 | * | 2053 | * |
2060 | * @buffer: The name of Ethernet interface to create on | 2054 | * Called from either the libfcoe 'create' module parameter |
2061 | * @kp: The associated kernel param | 2055 | * via fcoe_create or from fcoe_syfs's ctlr_create file. |
2062 | * | 2056 | * |
2063 | * Called from sysfs. | 2057 | * libfcoe's 'create' module parameter is deprecated so some |
2058 | * consolidation of code can be done when that interface is | ||
2059 | * removed. | ||
2064 | * | 2060 | * |
2065 | * Returns: 0 for success | 2061 | * Returns: 0 for success |
2066 | */ | 2062 | */ |
2067 | static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) | 2063 | static int _bnx2fc_create(struct net_device *netdev, |
2064 | enum fip_state fip_mode, | ||
2065 | enum bnx2fc_create_link_state link_state) | ||
2068 | { | 2066 | { |
2067 | struct fcoe_ctlr_device *cdev; | ||
2069 | struct fcoe_ctlr *ctlr; | 2068 | struct fcoe_ctlr *ctlr; |
2070 | struct bnx2fc_interface *interface; | 2069 | struct bnx2fc_interface *interface; |
2071 | struct bnx2fc_hba *hba; | 2070 | struct bnx2fc_hba *hba; |
@@ -2160,7 +2159,15 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) | |||
2160 | /* Make this master N_port */ | 2159 | /* Make this master N_port */ |
2161 | ctlr->lp = lport; | 2160 | ctlr->lp = lport; |
2162 | 2161 | ||
2163 | if (!bnx2fc_link_ok(lport)) { | 2162 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); |
2163 | |||
2164 | if (link_state == BNX2FC_CREATE_LINK_UP) | ||
2165 | cdev->enabled = FCOE_CTLR_ENABLED; | ||
2166 | else | ||
2167 | cdev->enabled = FCOE_CTLR_DISABLED; | ||
2168 | |||
2169 | if (link_state == BNX2FC_CREATE_LINK_UP && | ||
2170 | !bnx2fc_link_ok(lport)) { | ||
2164 | fcoe_ctlr_link_up(ctlr); | 2171 | fcoe_ctlr_link_up(ctlr); |
2165 | fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; | 2172 | fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; |
2166 | set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); | 2173 | set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); |
@@ -2168,7 +2175,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) | |||
2168 | 2175 | ||
2169 | BNX2FC_HBA_DBG(lport, "create: START DISC\n"); | 2176 | BNX2FC_HBA_DBG(lport, "create: START DISC\n"); |
2170 | bnx2fc_start_disc(interface); | 2177 | bnx2fc_start_disc(interface); |
2171 | interface->enabled = true; | 2178 | |
2179 | if (link_state == BNX2FC_CREATE_LINK_UP) | ||
2180 | interface->enabled = true; | ||
2181 | |||
2172 | /* | 2182 | /* |
2173 | * Release from kref_init in bnx2fc_interface_setup, on success | 2183 | * Release from kref_init in bnx2fc_interface_setup, on success |
2174 | * lport should be holding a reference taken in bnx2fc_if_create | 2184 | * lport should be holding a reference taken in bnx2fc_if_create |
@@ -2194,6 +2204,37 @@ mod_err: | |||
2194 | } | 2204 | } |
2195 | 2205 | ||
2196 | /** | 2206 | /** |
2207 | * bnx2fc_create() - Create a bnx2fc interface | ||
2208 | * @netdev : The net_device object the Ethernet interface to create on | ||
2209 | * @fip_mode: The FIP mode for this creation | ||
2210 | * | ||
2211 | * Called from fcoe transport | ||
2212 | * | ||
2213 | * Returns: 0 for success | ||
2214 | */ | ||
2215 | static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) | ||
2216 | { | ||
2217 | return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP); | ||
2218 | } | ||
2219 | |||
2220 | /** | ||
2221 | * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs | ||
2222 | * @netdev: The net_device to be used by the allocated FCoE Controller | ||
2223 | * | ||
2224 | * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr | ||
2225 | * in a link_down state. The allows the user an opportunity to configure | ||
2226 | * the FCoE Controller from sysfs before enabling the FCoE Controller. | ||
2227 | * | ||
2228 | * Creating in with this routine starts the FCoE Controller in Fabric | ||
2229 | * mode. The user can change to VN2VN or another mode before enabling. | ||
2230 | */ | ||
2231 | static int bnx2fc_ctlr_alloc(struct net_device *netdev) | ||
2232 | { | ||
2233 | return _bnx2fc_create(netdev, FIP_MODE_FABRIC, | ||
2234 | BNX2FC_CREATE_LINK_DOWN); | ||
2235 | } | ||
2236 | |||
2237 | /** | ||
2197 | * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance | 2238 | * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance |
2198 | * | 2239 | * |
2199 | * @cnic: Pointer to cnic device instance | 2240 | * @cnic: Pointer to cnic device instance |
@@ -2318,6 +2359,7 @@ static struct fcoe_transport bnx2fc_transport = { | |||
2318 | .name = {"bnx2fc"}, | 2359 | .name = {"bnx2fc"}, |
2319 | .attached = false, | 2360 | .attached = false, |
2320 | .list = LIST_HEAD_INIT(bnx2fc_transport.list), | 2361 | .list = LIST_HEAD_INIT(bnx2fc_transport.list), |
2362 | .alloc = bnx2fc_ctlr_alloc, | ||
2321 | .match = bnx2fc_match, | 2363 | .match = bnx2fc_match, |
2322 | .create = bnx2fc_create, | 2364 | .create = bnx2fc_create, |
2323 | .destroy = bnx2fc_destroy, | 2365 | .destroy = bnx2fc_destroy, |
@@ -2562,13 +2604,13 @@ module_init(bnx2fc_mod_init); | |||
2562 | module_exit(bnx2fc_mod_exit); | 2604 | module_exit(bnx2fc_mod_exit); |
2563 | 2605 | ||
2564 | static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = { | 2606 | static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = { |
2565 | .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode, | 2607 | .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled, |
2566 | .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb, | 2608 | .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, |
2567 | .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb, | 2609 | .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, |
2568 | .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb, | 2610 | .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, |
2569 | .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb, | 2611 | .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb, |
2570 | .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb, | 2612 | .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb, |
2571 | .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb, | 2613 | .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb, |
2572 | 2614 | ||
2573 | .get_fcoe_fcf_selected = fcoe_fcf_get_selected, | 2615 | .get_fcoe_fcf_selected = fcoe_fcf_get_selected, |
2574 | .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id, | 2616 | .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id, |
@@ -2675,7 +2717,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = { | |||
2675 | .elsct_send = bnx2fc_elsct_send, | 2717 | .elsct_send = bnx2fc_elsct_send, |
2676 | .fcp_abort_io = bnx2fc_abort_io, | 2718 | .fcp_abort_io = bnx2fc_abort_io, |
2677 | .fcp_cleanup = bnx2fc_cleanup, | 2719 | .fcp_cleanup = bnx2fc_cleanup, |
2678 | .get_lesb = bnx2fc_get_lesb, | 2720 | .get_lesb = fcoe_get_lesb, |
2679 | .rport_event_callback = bnx2fc_rport_event_handler, | 2721 | .rport_event_callback = bnx2fc_rport_event_handler, |
2680 | }; | 2722 | }; |
2681 | 2723 | ||
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 865c64fa923c..fed486bfd3f4 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c | |||
@@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, | |||
3747 | dcb->max_command = 1; | 3747 | dcb->max_command = 1; |
3748 | dcb->target_id = target; | 3748 | dcb->target_id = target; |
3749 | dcb->target_lun = lun; | 3749 | dcb->target_lun = lun; |
3750 | dcb->dev_mode = eeprom->target[target].cfg0; | ||
3750 | #ifndef DC395x_NO_DISCONNECT | 3751 | #ifndef DC395x_NO_DISCONNECT |
3751 | dcb->identify_msg = | 3752 | dcb->identify_msg = |
3752 | IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun); | 3753 | IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun); |
3753 | #else | 3754 | #else |
3754 | dcb->identify_msg = IDENTIFY(0, lun); | 3755 | dcb->identify_msg = IDENTIFY(0, lun); |
3755 | #endif | 3756 | #endif |
3756 | dcb->dev_mode = eeprom->target[target].cfg0; | ||
3757 | dcb->inquiry7 = 0; | 3757 | dcb->inquiry7 = 0; |
3758 | dcb->sync_mode = 0; | 3758 | dcb->sync_mode = 0; |
3759 | dcb->min_nego_period = clock_period[period_index]; | 3759 | dcb->min_nego_period = clock_period[period_index]; |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 666b7ac4475f..b5d92fc93c70 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -82,11 +82,11 @@ static int fcoe_rcv(struct sk_buff *, struct net_device *, | |||
82 | struct packet_type *, struct net_device *); | 82 | struct packet_type *, struct net_device *); |
83 | static int fcoe_percpu_receive_thread(void *); | 83 | static int fcoe_percpu_receive_thread(void *); |
84 | static void fcoe_percpu_clean(struct fc_lport *); | 84 | static void fcoe_percpu_clean(struct fc_lport *); |
85 | static int fcoe_link_speed_update(struct fc_lport *); | ||
86 | static int fcoe_link_ok(struct fc_lport *); | 85 | static int fcoe_link_ok(struct fc_lport *); |
87 | 86 | ||
88 | static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); | 87 | static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); |
89 | static int fcoe_hostlist_add(const struct fc_lport *); | 88 | static int fcoe_hostlist_add(const struct fc_lport *); |
89 | static void fcoe_hostlist_del(const struct fc_lport *); | ||
90 | 90 | ||
91 | static int fcoe_device_notification(struct notifier_block *, ulong, void *); | 91 | static int fcoe_device_notification(struct notifier_block *, ulong, void *); |
92 | static void fcoe_dev_setup(void); | 92 | static void fcoe_dev_setup(void); |
@@ -117,6 +117,11 @@ static int fcoe_destroy(struct net_device *netdev); | |||
117 | static int fcoe_enable(struct net_device *netdev); | 117 | static int fcoe_enable(struct net_device *netdev); |
118 | static int fcoe_disable(struct net_device *netdev); | 118 | static int fcoe_disable(struct net_device *netdev); |
119 | 119 | ||
120 | /* fcoe_syfs control interface handlers */ | ||
121 | static int fcoe_ctlr_alloc(struct net_device *netdev); | ||
122 | static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev); | ||
123 | |||
124 | |||
120 | static struct fc_seq *fcoe_elsct_send(struct fc_lport *, | 125 | static struct fc_seq *fcoe_elsct_send(struct fc_lport *, |
121 | u32 did, struct fc_frame *, | 126 | u32 did, struct fc_frame *, |
122 | unsigned int op, | 127 | unsigned int op, |
@@ -126,8 +131,6 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *, | |||
126 | void *, u32 timeout); | 131 | void *, u32 timeout); |
127 | static void fcoe_recv_frame(struct sk_buff *skb); | 132 | static void fcoe_recv_frame(struct sk_buff *skb); |
128 | 133 | ||
129 | static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); | ||
130 | |||
131 | /* notification function for packets from net device */ | 134 | /* notification function for packets from net device */ |
132 | static struct notifier_block fcoe_notifier = { | 135 | static struct notifier_block fcoe_notifier = { |
133 | .notifier_call = fcoe_device_notification, | 136 | .notifier_call = fcoe_device_notification, |
@@ -151,11 +154,11 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled); | |||
151 | static int fcoe_vport_disable(struct fc_vport *, bool disable); | 154 | static int fcoe_vport_disable(struct fc_vport *, bool disable); |
152 | static void fcoe_set_vport_symbolic_name(struct fc_vport *); | 155 | static void fcoe_set_vport_symbolic_name(struct fc_vport *); |
153 | static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); | 156 | static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); |
154 | static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *); | ||
155 | static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *); | 157 | static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *); |
156 | 158 | ||
157 | static struct fcoe_sysfs_function_template fcoe_sysfs_templ = { | 159 | static struct fcoe_sysfs_function_template fcoe_sysfs_templ = { |
158 | .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode, | 160 | .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode, |
161 | .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled, | ||
159 | .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, | 162 | .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, |
160 | .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, | 163 | .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, |
161 | .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, | 164 | .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, |
@@ -1112,10 +1115,17 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, | |||
1112 | port = lport_priv(lport); | 1115 | port = lport_priv(lport); |
1113 | port->lport = lport; | 1116 | port->lport = lport; |
1114 | port->priv = fcoe; | 1117 | port->priv = fcoe; |
1118 | port->get_netdev = fcoe_netdev; | ||
1115 | port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH; | 1119 | port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH; |
1116 | port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH; | 1120 | port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH; |
1117 | INIT_WORK(&port->destroy_work, fcoe_destroy_work); | 1121 | INIT_WORK(&port->destroy_work, fcoe_destroy_work); |
1118 | 1122 | ||
1123 | /* | ||
1124 | * Need to add the lport to the hostlist | ||
1125 | * so we catch NETDEV_CHANGE events. | ||
1126 | */ | ||
1127 | fcoe_hostlist_add(lport); | ||
1128 | |||
1119 | /* configure a fc_lport including the exchange manager */ | 1129 | /* configure a fc_lport including the exchange manager */ |
1120 | rc = fcoe_lport_config(lport); | 1130 | rc = fcoe_lport_config(lport); |
1121 | if (rc) { | 1131 | if (rc) { |
@@ -1187,6 +1197,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, | |||
1187 | out_lp_destroy: | 1197 | out_lp_destroy: |
1188 | fc_exch_mgr_free(lport); | 1198 | fc_exch_mgr_free(lport); |
1189 | out_host_put: | 1199 | out_host_put: |
1200 | fcoe_hostlist_del(lport); | ||
1190 | scsi_host_put(lport->host); | 1201 | scsi_host_put(lport->host); |
1191 | out: | 1202 | out: |
1192 | return ERR_PTR(rc); | 1203 | return ERR_PTR(rc); |
@@ -1964,6 +1975,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier, | |||
1964 | static int fcoe_device_notification(struct notifier_block *notifier, | 1975 | static int fcoe_device_notification(struct notifier_block *notifier, |
1965 | ulong event, void *ptr) | 1976 | ulong event, void *ptr) |
1966 | { | 1977 | { |
1978 | struct fcoe_ctlr_device *cdev; | ||
1967 | struct fc_lport *lport = NULL; | 1979 | struct fc_lport *lport = NULL; |
1968 | struct net_device *netdev = ptr; | 1980 | struct net_device *netdev = ptr; |
1969 | struct fcoe_ctlr *ctlr; | 1981 | struct fcoe_ctlr *ctlr; |
@@ -2020,13 +2032,29 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
2020 | 2032 | ||
2021 | fcoe_link_speed_update(lport); | 2033 | fcoe_link_speed_update(lport); |
2022 | 2034 | ||
2023 | if (link_possible && !fcoe_link_ok(lport)) | 2035 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); |
2024 | fcoe_ctlr_link_up(ctlr); | 2036 | |
2025 | else if (fcoe_ctlr_link_down(ctlr)) { | 2037 | if (link_possible && !fcoe_link_ok(lport)) { |
2026 | stats = per_cpu_ptr(lport->stats, get_cpu()); | 2038 | switch (cdev->enabled) { |
2027 | stats->LinkFailureCount++; | 2039 | case FCOE_CTLR_DISABLED: |
2028 | put_cpu(); | 2040 | pr_info("Link up while interface is disabled.\n"); |
2029 | fcoe_clean_pending_queue(lport); | 2041 | break; |
2042 | case FCOE_CTLR_ENABLED: | ||
2043 | case FCOE_CTLR_UNUSED: | ||
2044 | fcoe_ctlr_link_up(ctlr); | ||
2045 | }; | ||
2046 | } else if (fcoe_ctlr_link_down(ctlr)) { | ||
2047 | switch (cdev->enabled) { | ||
2048 | case FCOE_CTLR_DISABLED: | ||
2049 | pr_info("Link down while interface is disabled.\n"); | ||
2050 | break; | ||
2051 | case FCOE_CTLR_ENABLED: | ||
2052 | case FCOE_CTLR_UNUSED: | ||
2053 | stats = per_cpu_ptr(lport->stats, get_cpu()); | ||
2054 | stats->LinkFailureCount++; | ||
2055 | put_cpu(); | ||
2056 | fcoe_clean_pending_queue(lport); | ||
2057 | }; | ||
2030 | } | 2058 | } |
2031 | out: | 2059 | out: |
2032 | return rc; | 2060 | return rc; |
@@ -2039,6 +2067,8 @@ out: | |||
2039 | * Called from fcoe transport. | 2067 | * Called from fcoe transport. |
2040 | * | 2068 | * |
2041 | * Returns: 0 for success | 2069 | * Returns: 0 for success |
2070 | * | ||
2071 | * Deprecated: use fcoe_ctlr_enabled() | ||
2042 | */ | 2072 | */ |
2043 | static int fcoe_disable(struct net_device *netdev) | 2073 | static int fcoe_disable(struct net_device *netdev) |
2044 | { | 2074 | { |
@@ -2098,6 +2128,33 @@ out: | |||
2098 | } | 2128 | } |
2099 | 2129 | ||
2100 | /** | 2130 | /** |
2131 | * fcoe_ctlr_enabled() - Enable or disable an FCoE Controller | ||
2132 | * @cdev: The FCoE Controller that is being enabled or disabled | ||
2133 | * | ||
2134 | * fcoe_sysfs will ensure that the state of 'enabled' has | ||
2135 | * changed, so no checking is necessary here. This routine simply | ||
2136 | * calls fcoe_enable or fcoe_disable, both of which are deprecated. | ||
2137 | * When those routines are removed the functionality can be merged | ||
2138 | * here. | ||
2139 | */ | ||
2140 | static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev) | ||
2141 | { | ||
2142 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); | ||
2143 | struct fc_lport *lport = ctlr->lp; | ||
2144 | struct net_device *netdev = fcoe_netdev(lport); | ||
2145 | |||
2146 | switch (cdev->enabled) { | ||
2147 | case FCOE_CTLR_ENABLED: | ||
2148 | return fcoe_enable(netdev); | ||
2149 | case FCOE_CTLR_DISABLED: | ||
2150 | return fcoe_disable(netdev); | ||
2151 | case FCOE_CTLR_UNUSED: | ||
2152 | default: | ||
2153 | return -ENOTSUPP; | ||
2154 | }; | ||
2155 | } | ||
2156 | |||
2157 | /** | ||
2101 | * fcoe_destroy() - Destroy a FCoE interface | 2158 | * fcoe_destroy() - Destroy a FCoE interface |
2102 | * @netdev : The net_device object the Ethernet interface to create on | 2159 | * @netdev : The net_device object the Ethernet interface to create on |
2103 | * | 2160 | * |
@@ -2139,8 +2196,31 @@ static void fcoe_destroy_work(struct work_struct *work) | |||
2139 | { | 2196 | { |
2140 | struct fcoe_port *port; | 2197 | struct fcoe_port *port; |
2141 | struct fcoe_interface *fcoe; | 2198 | struct fcoe_interface *fcoe; |
2199 | struct Scsi_Host *shost; | ||
2200 | struct fc_host_attrs *fc_host; | ||
2201 | unsigned long flags; | ||
2202 | struct fc_vport *vport; | ||
2203 | struct fc_vport *next_vport; | ||
2142 | 2204 | ||
2143 | port = container_of(work, struct fcoe_port, destroy_work); | 2205 | port = container_of(work, struct fcoe_port, destroy_work); |
2206 | shost = port->lport->host; | ||
2207 | fc_host = shost_to_fc_host(shost); | ||
2208 | |||
2209 | /* Loop through all the vports and mark them for deletion */ | ||
2210 | spin_lock_irqsave(shost->host_lock, flags); | ||
2211 | list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) { | ||
2212 | if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { | ||
2213 | continue; | ||
2214 | } else { | ||
2215 | vport->flags |= FC_VPORT_DELETING; | ||
2216 | queue_work(fc_host_work_q(shost), | ||
2217 | &vport->vport_delete_work); | ||
2218 | } | ||
2219 | } | ||
2220 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
2221 | |||
2222 | flush_workqueue(fc_host_work_q(shost)); | ||
2223 | |||
2144 | mutex_lock(&fcoe_config_mutex); | 2224 | mutex_lock(&fcoe_config_mutex); |
2145 | 2225 | ||
2146 | fcoe = port->priv; | 2226 | fcoe = port->priv; |
@@ -2204,16 +2284,26 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe) | |||
2204 | #endif | 2284 | #endif |
2205 | } | 2285 | } |
2206 | 2286 | ||
2287 | enum fcoe_create_link_state { | ||
2288 | FCOE_CREATE_LINK_DOWN, | ||
2289 | FCOE_CREATE_LINK_UP, | ||
2290 | }; | ||
2291 | |||
2207 | /** | 2292 | /** |
2208 | * fcoe_create() - Create a fcoe interface | 2293 | * _fcoe_create() - (internal) Create a fcoe interface |
2209 | * @netdev : The net_device object the Ethernet interface to create on | 2294 | * @netdev : The net_device object the Ethernet interface to create on |
2210 | * @fip_mode: The FIP mode for this creation | 2295 | * @fip_mode: The FIP mode for this creation |
2296 | * @link_state: The ctlr link state on creation | ||
2211 | * | 2297 | * |
2212 | * Called from fcoe transport | 2298 | * Called from either the libfcoe 'create' module parameter |
2299 | * via fcoe_create or from fcoe_syfs's ctlr_create file. | ||
2213 | * | 2300 | * |
2214 | * Returns: 0 for success | 2301 | * libfcoe's 'create' module parameter is deprecated so some |
2302 | * consolidation of code can be done when that interface is | ||
2303 | * removed. | ||
2215 | */ | 2304 | */ |
2216 | static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) | 2305 | static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, |
2306 | enum fcoe_create_link_state link_state) | ||
2217 | { | 2307 | { |
2218 | int rc = 0; | 2308 | int rc = 0; |
2219 | struct fcoe_ctlr_device *ctlr_dev; | 2309 | struct fcoe_ctlr_device *ctlr_dev; |
@@ -2254,13 +2344,29 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) | |||
2254 | /* setup DCB priority attributes. */ | 2344 | /* setup DCB priority attributes. */ |
2255 | fcoe_dcb_create(fcoe); | 2345 | fcoe_dcb_create(fcoe); |
2256 | 2346 | ||
2257 | /* add to lports list */ | ||
2258 | fcoe_hostlist_add(lport); | ||
2259 | |||
2260 | /* start FIP Discovery and FLOGI */ | 2347 | /* start FIP Discovery and FLOGI */ |
2261 | lport->boot_time = jiffies; | 2348 | lport->boot_time = jiffies; |
2262 | fc_fabric_login(lport); | 2349 | fc_fabric_login(lport); |
2263 | if (!fcoe_link_ok(lport)) { | 2350 | |
2351 | /* | ||
2352 | * If the fcoe_ctlr_device is to be set to DISABLED | ||
2353 | * it must be done after the lport is added to the | ||
2354 | * hostlist, but before the rtnl_lock is released. | ||
2355 | * This is because the rtnl_lock protects the | ||
2356 | * hostlist that fcoe_device_notification uses. If | ||
2357 | * the FCoE Controller is intended to be created | ||
2358 | * DISABLED then 'enabled' needs to be considered | ||
2359 | * handling link events. 'enabled' must be set | ||
2360 | * before the lport can be found in the hostlist | ||
2361 | * when a link up event is received. | ||
2362 | */ | ||
2363 | if (link_state == FCOE_CREATE_LINK_UP) | ||
2364 | ctlr_dev->enabled = FCOE_CTLR_ENABLED; | ||
2365 | else | ||
2366 | ctlr_dev->enabled = FCOE_CTLR_DISABLED; | ||
2367 | |||
2368 | if (link_state == FCOE_CREATE_LINK_UP && | ||
2369 | !fcoe_link_ok(lport)) { | ||
2264 | rtnl_unlock(); | 2370 | rtnl_unlock(); |
2265 | fcoe_ctlr_link_up(ctlr); | 2371 | fcoe_ctlr_link_up(ctlr); |
2266 | mutex_unlock(&fcoe_config_mutex); | 2372 | mutex_unlock(&fcoe_config_mutex); |
@@ -2275,37 +2381,34 @@ out_nortnl: | |||
2275 | } | 2381 | } |
2276 | 2382 | ||
2277 | /** | 2383 | /** |
2278 | * fcoe_link_speed_update() - Update the supported and actual link speeds | 2384 | * fcoe_create() - Create a fcoe interface |
2279 | * @lport: The local port to update speeds for | 2385 | * @netdev : The net_device object the Ethernet interface to create on |
2386 | * @fip_mode: The FIP mode for this creation | ||
2387 | * | ||
2388 | * Called from fcoe transport | ||
2389 | * | ||
2390 | * Returns: 0 for success | ||
2391 | */ | ||
2392 | static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) | ||
2393 | { | ||
2394 | return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP); | ||
2395 | } | ||
2396 | |||
2397 | /** | ||
2398 | * fcoe_ctlr_alloc() - Allocate a fcoe interface from fcoe_sysfs | ||
2399 | * @netdev: The net_device to be used by the allocated FCoE Controller | ||
2280 | * | 2400 | * |
2281 | * Returns: 0 if the ethtool query was successful | 2401 | * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr |
2282 | * -1 if the ethtool query failed | 2402 | * in a link_down state. The allows the user an opportunity to configure |
2403 | * the FCoE Controller from sysfs before enabling the FCoE Controller. | ||
2404 | * | ||
2405 | * Creating in with this routine starts the FCoE Controller in Fabric | ||
2406 | * mode. The user can change to VN2VN or another mode before enabling. | ||
2283 | */ | 2407 | */ |
2284 | static int fcoe_link_speed_update(struct fc_lport *lport) | 2408 | static int fcoe_ctlr_alloc(struct net_device *netdev) |
2285 | { | 2409 | { |
2286 | struct net_device *netdev = fcoe_netdev(lport); | 2410 | return _fcoe_create(netdev, FIP_MODE_FABRIC, |
2287 | struct ethtool_cmd ecmd; | 2411 | FCOE_CREATE_LINK_DOWN); |
2288 | |||
2289 | if (!__ethtool_get_settings(netdev, &ecmd)) { | ||
2290 | lport->link_supported_speeds &= | ||
2291 | ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); | ||
2292 | if (ecmd.supported & (SUPPORTED_1000baseT_Half | | ||
2293 | SUPPORTED_1000baseT_Full)) | ||
2294 | lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; | ||
2295 | if (ecmd.supported & SUPPORTED_10000baseT_Full) | ||
2296 | lport->link_supported_speeds |= | ||
2297 | FC_PORTSPEED_10GBIT; | ||
2298 | switch (ethtool_cmd_speed(&ecmd)) { | ||
2299 | case SPEED_1000: | ||
2300 | lport->link_speed = FC_PORTSPEED_1GBIT; | ||
2301 | break; | ||
2302 | case SPEED_10000: | ||
2303 | lport->link_speed = FC_PORTSPEED_10GBIT; | ||
2304 | break; | ||
2305 | } | ||
2306 | return 0; | ||
2307 | } | ||
2308 | return -1; | ||
2309 | } | 2412 | } |
2310 | 2413 | ||
2311 | /** | 2414 | /** |
@@ -2375,10 +2478,13 @@ static int fcoe_reset(struct Scsi_Host *shost) | |||
2375 | struct fcoe_port *port = lport_priv(lport); | 2478 | struct fcoe_port *port = lport_priv(lport); |
2376 | struct fcoe_interface *fcoe = port->priv; | 2479 | struct fcoe_interface *fcoe = port->priv; |
2377 | struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); | 2480 | struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); |
2481 | struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2378 | 2482 | ||
2379 | fcoe_ctlr_link_down(ctlr); | 2483 | fcoe_ctlr_link_down(ctlr); |
2380 | fcoe_clean_pending_queue(ctlr->lp); | 2484 | fcoe_clean_pending_queue(ctlr->lp); |
2381 | if (!fcoe_link_ok(ctlr->lp)) | 2485 | |
2486 | if (cdev->enabled != FCOE_CTLR_DISABLED && | ||
2487 | !fcoe_link_ok(ctlr->lp)) | ||
2382 | fcoe_ctlr_link_up(ctlr); | 2488 | fcoe_ctlr_link_up(ctlr); |
2383 | return 0; | 2489 | return 0; |
2384 | } | 2490 | } |
@@ -2445,12 +2551,31 @@ static int fcoe_hostlist_add(const struct fc_lport *lport) | |||
2445 | return 0; | 2551 | return 0; |
2446 | } | 2552 | } |
2447 | 2553 | ||
2554 | /** | ||
2555 | * fcoe_hostlist_del() - Remove the FCoE interface identified by a local | ||
2556 | * port to the hostlist | ||
2557 | * @lport: The local port that identifies the FCoE interface to be added | ||
2558 | * | ||
2559 | * Locking: must be called with the RTNL mutex held | ||
2560 | * | ||
2561 | */ | ||
2562 | static void fcoe_hostlist_del(const struct fc_lport *lport) | ||
2563 | { | ||
2564 | struct fcoe_interface *fcoe; | ||
2565 | struct fcoe_port *port; | ||
2566 | |||
2567 | port = lport_priv(lport); | ||
2568 | fcoe = port->priv; | ||
2569 | list_del(&fcoe->list); | ||
2570 | return; | ||
2571 | } | ||
2448 | 2572 | ||
2449 | static struct fcoe_transport fcoe_sw_transport = { | 2573 | static struct fcoe_transport fcoe_sw_transport = { |
2450 | .name = {FCOE_TRANSPORT_DEFAULT}, | 2574 | .name = {FCOE_TRANSPORT_DEFAULT}, |
2451 | .attached = false, | 2575 | .attached = false, |
2452 | .list = LIST_HEAD_INIT(fcoe_sw_transport.list), | 2576 | .list = LIST_HEAD_INIT(fcoe_sw_transport.list), |
2453 | .match = fcoe_match, | 2577 | .match = fcoe_match, |
2578 | .alloc = fcoe_ctlr_alloc, | ||
2454 | .create = fcoe_create, | 2579 | .create = fcoe_create, |
2455 | .destroy = fcoe_destroy, | 2580 | .destroy = fcoe_destroy, |
2456 | .enable = fcoe_enable, | 2581 | .enable = fcoe_enable, |
@@ -2534,9 +2659,9 @@ static void __exit fcoe_exit(void) | |||
2534 | /* releases the associated fcoe hosts */ | 2659 | /* releases the associated fcoe hosts */ |
2535 | rtnl_lock(); | 2660 | rtnl_lock(); |
2536 | list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { | 2661 | list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { |
2537 | list_del(&fcoe->list); | ||
2538 | ctlr = fcoe_to_ctlr(fcoe); | 2662 | ctlr = fcoe_to_ctlr(fcoe); |
2539 | port = lport_priv(ctlr->lp); | 2663 | port = lport_priv(ctlr->lp); |
2664 | fcoe_hostlist_del(port->lport); | ||
2540 | queue_work(fcoe_wq, &port->destroy_work); | 2665 | queue_work(fcoe_wq, &port->destroy_work); |
2541 | } | 2666 | } |
2542 | rtnl_unlock(); | 2667 | rtnl_unlock(); |
@@ -2776,43 +2901,6 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) | |||
2776 | NULL, NULL, 3 * lport->r_a_tov); | 2901 | NULL, NULL, 3 * lport->r_a_tov); |
2777 | } | 2902 | } |
2778 | 2903 | ||
2779 | /** | ||
2780 | * fcoe_get_lesb() - Fill the FCoE Link Error Status Block | ||
2781 | * @lport: the local port | ||
2782 | * @fc_lesb: the link error status block | ||
2783 | */ | ||
2784 | static void fcoe_get_lesb(struct fc_lport *lport, | ||
2785 | struct fc_els_lesb *fc_lesb) | ||
2786 | { | ||
2787 | struct net_device *netdev = fcoe_netdev(lport); | ||
2788 | |||
2789 | __fcoe_get_lesb(lport, fc_lesb, netdev); | ||
2790 | } | ||
2791 | |||
2792 | static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev) | ||
2793 | { | ||
2794 | struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); | ||
2795 | struct net_device *netdev = fcoe_netdev(fip->lp); | ||
2796 | struct fcoe_fc_els_lesb *fcoe_lesb; | ||
2797 | struct fc_els_lesb fc_lesb; | ||
2798 | |||
2799 | __fcoe_get_lesb(fip->lp, &fc_lesb, netdev); | ||
2800 | fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb); | ||
2801 | |||
2802 | ctlr_dev->lesb.lesb_link_fail = | ||
2803 | ntohl(fcoe_lesb->lesb_link_fail); | ||
2804 | ctlr_dev->lesb.lesb_vlink_fail = | ||
2805 | ntohl(fcoe_lesb->lesb_vlink_fail); | ||
2806 | ctlr_dev->lesb.lesb_miss_fka = | ||
2807 | ntohl(fcoe_lesb->lesb_miss_fka); | ||
2808 | ctlr_dev->lesb.lesb_symb_err = | ||
2809 | ntohl(fcoe_lesb->lesb_symb_err); | ||
2810 | ctlr_dev->lesb.lesb_err_block = | ||
2811 | ntohl(fcoe_lesb->lesb_err_block); | ||
2812 | ctlr_dev->lesb.lesb_fcs_error = | ||
2813 | ntohl(fcoe_lesb->lesb_fcs_error); | ||
2814 | } | ||
2815 | |||
2816 | static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) | 2904 | static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) |
2817 | { | 2905 | { |
2818 | struct fcoe_ctlr_device *ctlr_dev = | 2906 | struct fcoe_ctlr_device *ctlr_dev = |
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index b42dc32cb5eb..2b53672bf932 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h | |||
@@ -55,12 +55,12 @@ do { \ | |||
55 | 55 | ||
56 | #define FCOE_DBG(fmt, args...) \ | 56 | #define FCOE_DBG(fmt, args...) \ |
57 | FCOE_CHECK_LOGGING(FCOE_LOGGING, \ | 57 | FCOE_CHECK_LOGGING(FCOE_LOGGING, \ |
58 | printk(KERN_INFO "fcoe: " fmt, ##args);) | 58 | pr_info("fcoe: " fmt, ##args);) |
59 | 59 | ||
60 | #define FCOE_NETDEV_DBG(netdev, fmt, args...) \ | 60 | #define FCOE_NETDEV_DBG(netdev, fmt, args...) \ |
61 | FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \ | 61 | FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \ |
62 | printk(KERN_INFO "fcoe: %s: " fmt, \ | 62 | pr_info("fcoe: %s: " fmt, \ |
63 | netdev->name, ##args);) | 63 | netdev->name, ##args);) |
64 | 64 | ||
65 | /** | 65 | /** |
66 | * struct fcoe_interface - A FCoE interface | 66 | * struct fcoe_interface - A FCoE interface |
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 4a909d7cfde1..08c3bc398da2 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -1291,8 +1291,16 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
1291 | 1291 | ||
1292 | LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); | 1292 | LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); |
1293 | 1293 | ||
1294 | if (!fcf || !lport->port_id) | 1294 | if (!fcf || !lport->port_id) { |
1295 | /* | ||
1296 | * We are yet to select best FCF, but we got CVL in the | ||
1297 | * meantime. reset the ctlr and let it rediscover the FCF | ||
1298 | */ | ||
1299 | mutex_lock(&fip->ctlr_mutex); | ||
1300 | fcoe_ctlr_reset(fip); | ||
1301 | mutex_unlock(&fip->ctlr_mutex); | ||
1295 | return; | 1302 | return; |
1303 | } | ||
1296 | 1304 | ||
1297 | /* | 1305 | /* |
1298 | * mask of required descriptors. Validating each one clears its bit. | 1306 | * mask of required descriptors. Validating each one clears its bit. |
@@ -1551,15 +1559,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
1551 | fcf->fabric_name, fcf->vfid, fcf->fcf_mac, | 1559 | fcf->fabric_name, fcf->vfid, fcf->fcf_mac, |
1552 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf), | 1560 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf), |
1553 | fcf->flogi_sent, fcf->pri); | 1561 | fcf->flogi_sent, fcf->pri); |
1554 | if (fcf->fabric_name != first->fabric_name || | ||
1555 | fcf->vfid != first->vfid || | ||
1556 | fcf->fc_map != first->fc_map) { | ||
1557 | LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " | ||
1558 | "or FC-MAP\n"); | ||
1559 | return NULL; | ||
1560 | } | ||
1561 | if (fcf->flogi_sent) | ||
1562 | continue; | ||
1563 | if (!fcoe_ctlr_fcf_usable(fcf)) { | 1562 | if (!fcoe_ctlr_fcf_usable(fcf)) { |
1564 | LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " | 1563 | LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " |
1565 | "map %x %svalid %savailable\n", | 1564 | "map %x %svalid %savailable\n", |
@@ -1569,6 +1568,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
1569 | "" : "un"); | 1568 | "" : "un"); |
1570 | continue; | 1569 | continue; |
1571 | } | 1570 | } |
1571 | if (fcf->fabric_name != first->fabric_name || | ||
1572 | fcf->vfid != first->vfid || | ||
1573 | fcf->fc_map != first->fc_map) { | ||
1574 | LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " | ||
1575 | "or FC-MAP\n"); | ||
1576 | return NULL; | ||
1577 | } | ||
1578 | if (fcf->flogi_sent) | ||
1579 | continue; | ||
1572 | if (!best || fcf->pri < best->pri || best->flogi_sent) | 1580 | if (!best || fcf->pri < best->pri || best->flogi_sent) |
1573 | best = fcf; | 1581 | best = fcf; |
1574 | } | 1582 | } |
@@ -2864,22 +2872,21 @@ void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev) | |||
2864 | } | 2872 | } |
2865 | EXPORT_SYMBOL(fcoe_fcf_get_selected); | 2873 | EXPORT_SYMBOL(fcoe_fcf_get_selected); |
2866 | 2874 | ||
2867 | void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev) | 2875 | void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) |
2868 | { | 2876 | { |
2869 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); | 2877 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); |
2870 | 2878 | ||
2871 | mutex_lock(&ctlr->ctlr_mutex); | 2879 | mutex_lock(&ctlr->ctlr_mutex); |
2872 | switch (ctlr->mode) { | 2880 | switch (ctlr_dev->mode) { |
2873 | case FIP_MODE_FABRIC: | 2881 | case FIP_CONN_TYPE_VN2VN: |
2874 | ctlr_dev->mode = FIP_CONN_TYPE_FABRIC; | 2882 | ctlr->mode = FIP_MODE_VN2VN; |
2875 | break; | ||
2876 | case FIP_MODE_VN2VN: | ||
2877 | ctlr_dev->mode = FIP_CONN_TYPE_VN2VN; | ||
2878 | break; | 2883 | break; |
2884 | case FIP_CONN_TYPE_FABRIC: | ||
2879 | default: | 2885 | default: |
2880 | ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN; | 2886 | ctlr->mode = FIP_MODE_FABRIC; |
2881 | break; | 2887 | break; |
2882 | } | 2888 | } |
2889 | |||
2883 | mutex_unlock(&ctlr->ctlr_mutex); | 2890 | mutex_unlock(&ctlr->ctlr_mutex); |
2884 | } | 2891 | } |
2885 | EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode); | 2892 | EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); |
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 5e751689a089..8c05ae017f5b 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c | |||
@@ -21,8 +21,17 @@ | |||
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/etherdevice.h> | 23 | #include <linux/etherdevice.h> |
24 | #include <linux/ctype.h> | ||
24 | 25 | ||
25 | #include <scsi/fcoe_sysfs.h> | 26 | #include <scsi/fcoe_sysfs.h> |
27 | #include <scsi/libfcoe.h> | ||
28 | |||
29 | /* | ||
30 | * OK to include local libfcoe.h for debug_logging, but cannot include | ||
31 | * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have | ||
32 | * have to include more than fcoe_sysfs.h. | ||
33 | */ | ||
34 | #include "libfcoe.h" | ||
26 | 35 | ||
27 | static atomic_t ctlr_num; | 36 | static atomic_t ctlr_num; |
28 | static atomic_t fcf_num; | 37 | static atomic_t fcf_num; |
@@ -71,6 +80,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo, | |||
71 | ((x)->lesb.lesb_err_block) | 80 | ((x)->lesb.lesb_err_block) |
72 | #define fcoe_ctlr_fcs_error(x) \ | 81 | #define fcoe_ctlr_fcs_error(x) \ |
73 | ((x)->lesb.lesb_fcs_error) | 82 | ((x)->lesb.lesb_fcs_error) |
83 | #define fcoe_ctlr_enabled(x) \ | ||
84 | ((x)->enabled) | ||
74 | #define fcoe_fcf_state(x) \ | 85 | #define fcoe_fcf_state(x) \ |
75 | ((x)->state) | 86 | ((x)->state) |
76 | #define fcoe_fcf_fabric_name(x) \ | 87 | #define fcoe_fcf_fabric_name(x) \ |
@@ -210,25 +221,34 @@ static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ | |||
210 | #define fcoe_enum_name_search(title, table_type, table) \ | 221 | #define fcoe_enum_name_search(title, table_type, table) \ |
211 | static const char *get_fcoe_##title##_name(enum table_type table_key) \ | 222 | static const char *get_fcoe_##title##_name(enum table_type table_key) \ |
212 | { \ | 223 | { \ |
213 | int i; \ | 224 | if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \ |
214 | char *name = NULL; \ | 225 | return NULL; \ |
215 | \ | 226 | return table[table_key]; \ |
216 | for (i = 0; i < ARRAY_SIZE(table); i++) { \ | 227 | } |
217 | if (table[i].value == table_key) { \ | 228 | |
218 | name = table[i].name; \ | 229 | static char *fip_conn_type_names[] = { |
219 | break; \ | 230 | [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown", |
220 | } \ | 231 | [ FIP_CONN_TYPE_FABRIC ] = "Fabric", |
221 | } \ | 232 | [ FIP_CONN_TYPE_VN2VN ] = "VN2VN", |
222 | return name; \ | 233 | }; |
234 | fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names) | ||
235 | |||
236 | static enum fip_conn_type fcoe_parse_mode(const char *buf) | ||
237 | { | ||
238 | int i; | ||
239 | |||
240 | for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) { | ||
241 | if (strcasecmp(buf, fip_conn_type_names[i]) == 0) | ||
242 | return i; | ||
243 | } | ||
244 | |||
245 | return FIP_CONN_TYPE_UNKNOWN; | ||
223 | } | 246 | } |
224 | 247 | ||
225 | static struct { | 248 | static char *fcf_state_names[] = { |
226 | enum fcf_state value; | 249 | [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown", |
227 | char *name; | 250 | [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected", |
228 | } fcf_state_names[] = { | 251 | [ FCOE_FCF_STATE_CONNECTED ] = "Connected", |
229 | { FCOE_FCF_STATE_UNKNOWN, "Unknown" }, | ||
230 | { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" }, | ||
231 | { FCOE_FCF_STATE_CONNECTED, "Connected" }, | ||
232 | }; | 252 | }; |
233 | fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names) | 253 | fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names) |
234 | #define FCOE_FCF_STATE_MAX_NAMELEN 50 | 254 | #define FCOE_FCF_STATE_MAX_NAMELEN 50 |
@@ -246,17 +266,7 @@ static ssize_t show_fcf_state(struct device *dev, | |||
246 | } | 266 | } |
247 | static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL); | 267 | static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL); |
248 | 268 | ||
249 | static struct { | 269 | #define FCOE_MAX_MODENAME_LEN 20 |
250 | enum fip_conn_type value; | ||
251 | char *name; | ||
252 | } fip_conn_type_names[] = { | ||
253 | { FIP_CONN_TYPE_UNKNOWN, "Unknown" }, | ||
254 | { FIP_CONN_TYPE_FABRIC, "Fabric" }, | ||
255 | { FIP_CONN_TYPE_VN2VN, "VN2VN" }, | ||
256 | }; | ||
257 | fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names) | ||
258 | #define FCOE_CTLR_MODE_MAX_NAMELEN 50 | ||
259 | |||
260 | static ssize_t show_ctlr_mode(struct device *dev, | 270 | static ssize_t show_ctlr_mode(struct device *dev, |
261 | struct device_attribute *attr, | 271 | struct device_attribute *attr, |
262 | char *buf) | 272 | char *buf) |
@@ -264,17 +274,116 @@ static ssize_t show_ctlr_mode(struct device *dev, | |||
264 | struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); | 274 | struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); |
265 | const char *name; | 275 | const char *name; |
266 | 276 | ||
267 | if (ctlr->f->get_fcoe_ctlr_mode) | ||
268 | ctlr->f->get_fcoe_ctlr_mode(ctlr); | ||
269 | |||
270 | name = get_fcoe_ctlr_mode_name(ctlr->mode); | 277 | name = get_fcoe_ctlr_mode_name(ctlr->mode); |
271 | if (!name) | 278 | if (!name) |
272 | return -EINVAL; | 279 | return -EINVAL; |
273 | return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN, | 280 | return snprintf(buf, FCOE_MAX_MODENAME_LEN, |
281 | "%s\n", name); | ||
282 | } | ||
283 | |||
284 | static ssize_t store_ctlr_mode(struct device *dev, | ||
285 | struct device_attribute *attr, | ||
286 | const char *buf, size_t count) | ||
287 | { | ||
288 | struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); | ||
289 | char mode[FCOE_MAX_MODENAME_LEN + 1]; | ||
290 | |||
291 | if (count > FCOE_MAX_MODENAME_LEN) | ||
292 | return -EINVAL; | ||
293 | |||
294 | strncpy(mode, buf, count); | ||
295 | |||
296 | if (mode[count - 1] == '\n') | ||
297 | mode[count - 1] = '\0'; | ||
298 | else | ||
299 | mode[count] = '\0'; | ||
300 | |||
301 | switch (ctlr->enabled) { | ||
302 | case FCOE_CTLR_ENABLED: | ||
303 | LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled."); | ||
304 | return -EBUSY; | ||
305 | case FCOE_CTLR_DISABLED: | ||
306 | if (!ctlr->f->set_fcoe_ctlr_mode) { | ||
307 | LIBFCOE_SYSFS_DBG(ctlr, | ||
308 | "Mode change not supported by LLD."); | ||
309 | return -ENOTSUPP; | ||
310 | } | ||
311 | |||
312 | ctlr->mode = fcoe_parse_mode(mode); | ||
313 | if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { | ||
314 | LIBFCOE_SYSFS_DBG(ctlr, | ||
315 | "Unknown mode %s provided.", buf); | ||
316 | return -EINVAL; | ||
317 | } | ||
318 | |||
319 | ctlr->f->set_fcoe_ctlr_mode(ctlr); | ||
320 | LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf); | ||
321 | |||
322 | return count; | ||
323 | case FCOE_CTLR_UNUSED: | ||
324 | default: | ||
325 | LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported."); | ||
326 | return -ENOTSUPP; | ||
327 | }; | ||
328 | } | ||
329 | |||
330 | static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR, | ||
331 | show_ctlr_mode, store_ctlr_mode); | ||
332 | |||
333 | static ssize_t store_ctlr_enabled(struct device *dev, | ||
334 | struct device_attribute *attr, | ||
335 | const char *buf, size_t count) | ||
336 | { | ||
337 | struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); | ||
338 | int rc; | ||
339 | |||
340 | switch (ctlr->enabled) { | ||
341 | case FCOE_CTLR_ENABLED: | ||
342 | if (*buf == '1') | ||
343 | return count; | ||
344 | ctlr->enabled = FCOE_CTLR_DISABLED; | ||
345 | break; | ||
346 | case FCOE_CTLR_DISABLED: | ||
347 | if (*buf == '0') | ||
348 | return count; | ||
349 | ctlr->enabled = FCOE_CTLR_ENABLED; | ||
350 | break; | ||
351 | case FCOE_CTLR_UNUSED: | ||
352 | return -ENOTSUPP; | ||
353 | }; | ||
354 | |||
355 | rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr); | ||
356 | if (rc) | ||
357 | return rc; | ||
358 | |||
359 | return count; | ||
360 | } | ||
361 | |||
362 | static char *ctlr_enabled_state_names[] = { | ||
363 | [ FCOE_CTLR_ENABLED ] = "1", | ||
364 | [ FCOE_CTLR_DISABLED ] = "0", | ||
365 | }; | ||
366 | fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state, | ||
367 | ctlr_enabled_state_names) | ||
368 | #define FCOE_CTLR_ENABLED_MAX_NAMELEN 50 | ||
369 | |||
370 | static ssize_t show_ctlr_enabled_state(struct device *dev, | ||
371 | struct device_attribute *attr, | ||
372 | char *buf) | ||
373 | { | ||
374 | struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); | ||
375 | const char *name; | ||
376 | |||
377 | name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled); | ||
378 | if (!name) | ||
379 | return -EINVAL; | ||
380 | return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN, | ||
274 | "%s\n", name); | 381 | "%s\n", name); |
275 | } | 382 | } |
276 | static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO, | 383 | |
277 | show_ctlr_mode, NULL); | 384 | static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR, |
385 | show_ctlr_enabled_state, | ||
386 | store_ctlr_enabled); | ||
278 | 387 | ||
279 | static ssize_t | 388 | static ssize_t |
280 | store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, | 389 | store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, |
@@ -359,6 +468,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = { | |||
359 | 468 | ||
360 | static struct attribute *fcoe_ctlr_attrs[] = { | 469 | static struct attribute *fcoe_ctlr_attrs[] = { |
361 | &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, | 470 | &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, |
471 | &device_attr_fcoe_ctlr_enabled.attr, | ||
362 | &device_attr_fcoe_ctlr_mode.attr, | 472 | &device_attr_fcoe_ctlr_mode.attr, |
363 | NULL, | 473 | NULL, |
364 | }; | 474 | }; |
@@ -443,9 +553,16 @@ struct device_type fcoe_fcf_device_type = { | |||
443 | .release = fcoe_fcf_device_release, | 553 | .release = fcoe_fcf_device_release, |
444 | }; | 554 | }; |
445 | 555 | ||
556 | struct bus_attribute fcoe_bus_attr_group[] = { | ||
557 | __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store), | ||
558 | __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store), | ||
559 | __ATTR_NULL | ||
560 | }; | ||
561 | |||
446 | struct bus_type fcoe_bus_type = { | 562 | struct bus_type fcoe_bus_type = { |
447 | .name = "fcoe", | 563 | .name = "fcoe", |
448 | .match = &fcoe_bus_match, | 564 | .match = &fcoe_bus_match, |
565 | .bus_attrs = fcoe_bus_attr_group, | ||
449 | }; | 566 | }; |
450 | 567 | ||
451 | /** | 568 | /** |
@@ -566,6 +683,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, | |||
566 | 683 | ||
567 | ctlr->id = atomic_inc_return(&ctlr_num) - 1; | 684 | ctlr->id = atomic_inc_return(&ctlr_num) - 1; |
568 | ctlr->f = f; | 685 | ctlr->f = f; |
686 | ctlr->mode = FIP_CONN_TYPE_FABRIC; | ||
569 | INIT_LIST_HEAD(&ctlr->fcfs); | 687 | INIT_LIST_HEAD(&ctlr->fcfs); |
570 | mutex_init(&ctlr->lock); | 688 | mutex_init(&ctlr->lock); |
571 | ctlr->dev.parent = parent; | 689 | ctlr->dev.parent = parent; |
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index ac76d8a042d7..f3a5a53e8631 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c | |||
@@ -83,6 +83,50 @@ static struct notifier_block libfcoe_notifier = { | |||
83 | .notifier_call = libfcoe_device_notification, | 83 | .notifier_call = libfcoe_device_notification, |
84 | }; | 84 | }; |
85 | 85 | ||
86 | /** | ||
87 | * fcoe_link_speed_update() - Update the supported and actual link speeds | ||
88 | * @lport: The local port to update speeds for | ||
89 | * | ||
90 | * Returns: 0 if the ethtool query was successful | ||
91 | * -1 if the ethtool query failed | ||
92 | */ | ||
93 | int fcoe_link_speed_update(struct fc_lport *lport) | ||
94 | { | ||
95 | struct net_device *netdev = fcoe_get_netdev(lport); | ||
96 | struct ethtool_cmd ecmd; | ||
97 | |||
98 | if (!__ethtool_get_settings(netdev, &ecmd)) { | ||
99 | lport->link_supported_speeds &= | ||
100 | ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); | ||
101 | if (ecmd.supported & (SUPPORTED_1000baseT_Half | | ||
102 | SUPPORTED_1000baseT_Full)) | ||
103 | lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; | ||
104 | if (ecmd.supported & SUPPORTED_10000baseT_Full) | ||
105 | lport->link_supported_speeds |= | ||
106 | FC_PORTSPEED_10GBIT; | ||
107 | switch (ethtool_cmd_speed(&ecmd)) { | ||
108 | case SPEED_1000: | ||
109 | lport->link_speed = FC_PORTSPEED_1GBIT; | ||
110 | break; | ||
111 | case SPEED_10000: | ||
112 | lport->link_speed = FC_PORTSPEED_10GBIT; | ||
113 | break; | ||
114 | } | ||
115 | return 0; | ||
116 | } | ||
117 | return -1; | ||
118 | } | ||
119 | EXPORT_SYMBOL_GPL(fcoe_link_speed_update); | ||
120 | |||
121 | /** | ||
122 | * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport | ||
123 | * @lport: The local port to update speeds for | ||
124 | * @fc_lesb: Pointer to the LESB to be filled up | ||
125 | * @netdev: Pointer to the netdev that is associated with the lport | ||
126 | * | ||
127 | * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6 | ||
128 | * Clause 7.11 in v1.04. | ||
129 | */ | ||
86 | void __fcoe_get_lesb(struct fc_lport *lport, | 130 | void __fcoe_get_lesb(struct fc_lport *lport, |
87 | struct fc_els_lesb *fc_lesb, | 131 | struct fc_els_lesb *fc_lesb, |
88 | struct net_device *netdev) | 132 | struct net_device *netdev) |
@@ -112,6 +156,51 @@ void __fcoe_get_lesb(struct fc_lport *lport, | |||
112 | } | 156 | } |
113 | EXPORT_SYMBOL_GPL(__fcoe_get_lesb); | 157 | EXPORT_SYMBOL_GPL(__fcoe_get_lesb); |
114 | 158 | ||
159 | /** | ||
160 | * fcoe_get_lesb() - Fill the FCoE Link Error Status Block | ||
161 | * @lport: the local port | ||
162 | * @fc_lesb: the link error status block | ||
163 | */ | ||
164 | void fcoe_get_lesb(struct fc_lport *lport, | ||
165 | struct fc_els_lesb *fc_lesb) | ||
166 | { | ||
167 | struct net_device *netdev = fcoe_get_netdev(lport); | ||
168 | |||
169 | __fcoe_get_lesb(lport, fc_lesb, netdev); | ||
170 | } | ||
171 | EXPORT_SYMBOL_GPL(fcoe_get_lesb); | ||
172 | |||
173 | /** | ||
174 | * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given | ||
175 | * fcoe controller device | ||
176 | * @ctlr_dev: The given fcoe controller device | ||
177 | * | ||
178 | */ | ||
179 | void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev) | ||
180 | { | ||
181 | struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); | ||
182 | struct net_device *netdev = fcoe_get_netdev(fip->lp); | ||
183 | struct fcoe_fc_els_lesb *fcoe_lesb; | ||
184 | struct fc_els_lesb fc_lesb; | ||
185 | |||
186 | __fcoe_get_lesb(fip->lp, &fc_lesb, netdev); | ||
187 | fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb); | ||
188 | |||
189 | ctlr_dev->lesb.lesb_link_fail = | ||
190 | ntohl(fcoe_lesb->lesb_link_fail); | ||
191 | ctlr_dev->lesb.lesb_vlink_fail = | ||
192 | ntohl(fcoe_lesb->lesb_vlink_fail); | ||
193 | ctlr_dev->lesb.lesb_miss_fka = | ||
194 | ntohl(fcoe_lesb->lesb_miss_fka); | ||
195 | ctlr_dev->lesb.lesb_symb_err = | ||
196 | ntohl(fcoe_lesb->lesb_symb_err); | ||
197 | ctlr_dev->lesb.lesb_err_block = | ||
198 | ntohl(fcoe_lesb->lesb_err_block); | ||
199 | ctlr_dev->lesb.lesb_fcs_error = | ||
200 | ntohl(fcoe_lesb->lesb_fcs_error); | ||
201 | } | ||
202 | EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb); | ||
203 | |||
115 | void fcoe_wwn_to_str(u64 wwn, char *buf, int len) | 204 | void fcoe_wwn_to_str(u64 wwn, char *buf, int len) |
116 | { | 205 | { |
117 | u8 wwpn[8]; | 206 | u8 wwpn[8]; |
@@ -627,6 +716,110 @@ static int libfcoe_device_notification(struct notifier_block *notifier, | |||
627 | return NOTIFY_OK; | 716 | return NOTIFY_OK; |
628 | } | 717 | } |
629 | 718 | ||
719 | ssize_t fcoe_ctlr_create_store(struct bus_type *bus, | ||
720 | const char *buf, size_t count) | ||
721 | { | ||
722 | struct net_device *netdev = NULL; | ||
723 | struct fcoe_transport *ft = NULL; | ||
724 | struct fcoe_ctlr_device *ctlr_dev = NULL; | ||
725 | int rc = 0; | ||
726 | int err; | ||
727 | |||
728 | mutex_lock(&ft_mutex); | ||
729 | |||
730 | netdev = fcoe_if_to_netdev(buf); | ||
731 | if (!netdev) { | ||
732 | LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf); | ||
733 | rc = -ENODEV; | ||
734 | goto out_nodev; | ||
735 | } | ||
736 | |||
737 | ft = fcoe_netdev_map_lookup(netdev); | ||
738 | if (ft) { | ||
739 | LIBFCOE_TRANSPORT_DBG("transport %s already has existing " | ||
740 | "FCoE instance on %s.\n", | ||
741 | ft->name, netdev->name); | ||
742 | rc = -EEXIST; | ||
743 | goto out_putdev; | ||
744 | } | ||
745 | |||
746 | ft = fcoe_transport_lookup(netdev); | ||
747 | if (!ft) { | ||
748 | LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", | ||
749 | netdev->name); | ||
750 | rc = -ENODEV; | ||
751 | goto out_putdev; | ||
752 | } | ||
753 | |||
754 | /* pass to transport create */ | ||
755 | err = ft->alloc ? ft->alloc(netdev) : -ENODEV; | ||
756 | if (err) { | ||
757 | fcoe_del_netdev_mapping(netdev); | ||
758 | rc = -ENOMEM; | ||
759 | goto out_putdev; | ||
760 | } | ||
761 | |||
762 | err = fcoe_add_netdev_mapping(netdev, ft); | ||
763 | if (err) { | ||
764 | LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping " | ||
765 | "for FCoE transport %s for %s.\n", | ||
766 | ft->name, netdev->name); | ||
767 | rc = -ENODEV; | ||
768 | goto out_putdev; | ||
769 | } | ||
770 | |||
771 | LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n", | ||
772 | ft->name, (ctlr_dev) ? "succeeded" : "failed", | ||
773 | netdev->name); | ||
774 | |||
775 | out_putdev: | ||
776 | dev_put(netdev); | ||
777 | out_nodev: | ||
778 | mutex_unlock(&ft_mutex); | ||
779 | if (rc) | ||
780 | return rc; | ||
781 | return count; | ||
782 | } | ||
783 | |||
784 | ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus, | ||
785 | const char *buf, size_t count) | ||
786 | { | ||
787 | int rc = -ENODEV; | ||
788 | struct net_device *netdev = NULL; | ||
789 | struct fcoe_transport *ft = NULL; | ||
790 | |||
791 | mutex_lock(&ft_mutex); | ||
792 | |||
793 | netdev = fcoe_if_to_netdev(buf); | ||
794 | if (!netdev) { | ||
795 | LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf); | ||
796 | goto out_nodev; | ||
797 | } | ||
798 | |||
799 | ft = fcoe_netdev_map_lookup(netdev); | ||
800 | if (!ft) { | ||
801 | LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", | ||
802 | netdev->name); | ||
803 | goto out_putdev; | ||
804 | } | ||
805 | |||
806 | /* pass to transport destroy */ | ||
807 | rc = ft->destroy(netdev); | ||
808 | if (rc) | ||
809 | goto out_putdev; | ||
810 | |||
811 | fcoe_del_netdev_mapping(netdev); | ||
812 | LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n", | ||
813 | ft->name, (rc) ? "failed" : "succeeded", | ||
814 | netdev->name); | ||
815 | rc = count; /* required for successful return */ | ||
816 | out_putdev: | ||
817 | dev_put(netdev); | ||
818 | out_nodev: | ||
819 | mutex_unlock(&ft_mutex); | ||
820 | return rc; | ||
821 | } | ||
822 | EXPORT_SYMBOL(fcoe_ctlr_destroy_store); | ||
630 | 823 | ||
631 | /** | 824 | /** |
632 | * fcoe_transport_create() - Create a fcoe interface | 825 | * fcoe_transport_create() - Create a fcoe interface |
@@ -769,11 +962,7 @@ out_putdev: | |||
769 | dev_put(netdev); | 962 | dev_put(netdev); |
770 | out_nodev: | 963 | out_nodev: |
771 | mutex_unlock(&ft_mutex); | 964 | mutex_unlock(&ft_mutex); |
772 | 965 | return rc; | |
773 | if (rc == -ERESTARTSYS) | ||
774 | return restart_syscall(); | ||
775 | else | ||
776 | return rc; | ||
777 | } | 966 | } |
778 | 967 | ||
779 | /** | 968 | /** |
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h index 6af5fc3a17d8..d3bb16d11401 100644 --- a/drivers/scsi/fcoe/libfcoe.h +++ b/drivers/scsi/fcoe/libfcoe.h | |||
@@ -2,9 +2,10 @@ | |||
2 | #define _FCOE_LIBFCOE_H_ | 2 | #define _FCOE_LIBFCOE_H_ |
3 | 3 | ||
4 | extern unsigned int libfcoe_debug_logging; | 4 | extern unsigned int libfcoe_debug_logging; |
5 | #define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ | 5 | #define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ |
6 | #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ | 6 | #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ |
7 | #define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */ | 7 | #define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */ |
8 | #define LIBFCOE_SYSFS_LOGGING 0x08 /* fcoe_sysfs logging */ | ||
8 | 9 | ||
9 | #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ | 10 | #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ |
10 | do { \ | 11 | do { \ |
@@ -16,16 +17,19 @@ do { \ | |||
16 | 17 | ||
17 | #define LIBFCOE_DBG(fmt, args...) \ | 18 | #define LIBFCOE_DBG(fmt, args...) \ |
18 | LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ | 19 | LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ |
19 | printk(KERN_INFO "libfcoe: " fmt, ##args);) | 20 | pr_info("libfcoe: " fmt, ##args);) |
20 | 21 | ||
21 | #define LIBFCOE_FIP_DBG(fip, fmt, args...) \ | 22 | #define LIBFCOE_FIP_DBG(fip, fmt, args...) \ |
22 | LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ | 23 | LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ |
23 | printk(KERN_INFO "host%d: fip: " fmt, \ | 24 | pr_info("host%d: fip: " fmt, \ |
24 | (fip)->lp->host->host_no, ##args);) | 25 | (fip)->lp->host->host_no, ##args);) |
25 | 26 | ||
26 | #define LIBFCOE_TRANSPORT_DBG(fmt, args...) \ | 27 | #define LIBFCOE_TRANSPORT_DBG(fmt, args...) \ |
27 | LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \ | 28 | LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \ |
28 | printk(KERN_INFO "%s: " fmt, \ | 29 | pr_info("%s: " fmt, __func__, ##args);) |
29 | __func__, ##args);) | 30 | |
31 | #define LIBFCOE_SYSFS_DBG(cdev, fmt, args...) \ | ||
32 | LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING, \ | ||
33 | pr_info("ctlr_%d: " fmt, cdev->id, ##args);) | ||
30 | 34 | ||
31 | #endif /* _FCOE_LIBFCOE_H_ */ | 35 | #endif /* _FCOE_LIBFCOE_H_ */ |
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile index 37c3440bc17c..383598fadf04 100644 --- a/drivers/scsi/fnic/Makefile +++ b/drivers/scsi/fnic/Makefile | |||
@@ -7,6 +7,8 @@ fnic-y := \ | |||
7 | fnic_res.o \ | 7 | fnic_res.o \ |
8 | fnic_fcs.o \ | 8 | fnic_fcs.o \ |
9 | fnic_scsi.o \ | 9 | fnic_scsi.o \ |
10 | fnic_trace.o \ | ||
11 | fnic_debugfs.o \ | ||
10 | vnic_cq.o \ | 12 | vnic_cq.o \ |
11 | vnic_dev.o \ | 13 | vnic_dev.o \ |
12 | vnic_intr.o \ | 14 | vnic_intr.o \ |
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 95a5ba29320d..98436c363035 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <scsi/libfcoe.h> | 26 | #include <scsi/libfcoe.h> |
27 | #include "fnic_io.h" | 27 | #include "fnic_io.h" |
28 | #include "fnic_res.h" | 28 | #include "fnic_res.h" |
29 | #include "fnic_trace.h" | ||
29 | #include "vnic_dev.h" | 30 | #include "vnic_dev.h" |
30 | #include "vnic_wq.h" | 31 | #include "vnic_wq.h" |
31 | #include "vnic_rq.h" | 32 | #include "vnic_rq.h" |
@@ -56,6 +57,34 @@ | |||
56 | #define FNIC_NO_TAG -1 | 57 | #define FNIC_NO_TAG -1 |
57 | 58 | ||
58 | /* | 59 | /* |
60 | * Command flags to identify the type of command and for other future | ||
61 | * use. | ||
62 | */ | ||
63 | #define FNIC_NO_FLAGS 0 | ||
64 | #define FNIC_IO_INITIALIZED BIT(0) | ||
65 | #define FNIC_IO_ISSUED BIT(1) | ||
66 | #define FNIC_IO_DONE BIT(2) | ||
67 | #define FNIC_IO_REQ_NULL BIT(3) | ||
68 | #define FNIC_IO_ABTS_PENDING BIT(4) | ||
69 | #define FNIC_IO_ABORTED BIT(5) | ||
70 | #define FNIC_IO_ABTS_ISSUED BIT(6) | ||
71 | #define FNIC_IO_TERM_ISSUED BIT(7) | ||
72 | #define FNIC_IO_INTERNAL_TERM_ISSUED BIT(8) | ||
73 | #define FNIC_IO_ABT_TERM_DONE BIT(9) | ||
74 | #define FNIC_IO_ABT_TERM_REQ_NULL BIT(10) | ||
75 | #define FNIC_IO_ABT_TERM_TIMED_OUT BIT(11) | ||
76 | #define FNIC_DEVICE_RESET BIT(12) /* Device reset request */ | ||
77 | #define FNIC_DEV_RST_ISSUED BIT(13) | ||
78 | #define FNIC_DEV_RST_TIMED_OUT BIT(14) | ||
79 | #define FNIC_DEV_RST_ABTS_ISSUED BIT(15) | ||
80 | #define FNIC_DEV_RST_TERM_ISSUED BIT(16) | ||
81 | #define FNIC_DEV_RST_DONE BIT(17) | ||
82 | #define FNIC_DEV_RST_REQ_NULL BIT(18) | ||
83 | #define FNIC_DEV_RST_ABTS_DONE BIT(19) | ||
84 | #define FNIC_DEV_RST_TERM_DONE BIT(20) | ||
85 | #define FNIC_DEV_RST_ABTS_PENDING BIT(21) | ||
86 | |||
87 | /* | ||
59 | * Usage of the scsi_cmnd scratchpad. | 88 | * Usage of the scsi_cmnd scratchpad. |
60 | * These fields are locked by the hashed io_req_lock. | 89 | * These fields are locked by the hashed io_req_lock. |
61 | */ | 90 | */ |
@@ -64,6 +93,7 @@ | |||
64 | #define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) | 93 | #define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) |
65 | #define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) | 94 | #define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) |
66 | #define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) | 95 | #define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) |
96 | #define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status) | ||
67 | 97 | ||
68 | #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ | 98 | #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ |
69 | 99 | ||
@@ -71,9 +101,28 @@ | |||
71 | #define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */ | 101 | #define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */ |
72 | #define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */ | 102 | #define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */ |
73 | #define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */ | 103 | #define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */ |
104 | #define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */ | ||
74 | 105 | ||
75 | #define FNIC_MAX_FCP_TARGET 256 | 106 | #define FNIC_MAX_FCP_TARGET 256 |
76 | 107 | ||
108 | /** | ||
109 | * state_flags to identify host state along along with fnic's state | ||
110 | **/ | ||
111 | #define __FNIC_FLAGS_FWRESET BIT(0) /* fwreset in progress */ | ||
112 | #define __FNIC_FLAGS_BLOCK_IO BIT(1) /* IOs are blocked */ | ||
113 | |||
114 | #define FNIC_FLAGS_NONE (0) | ||
115 | #define FNIC_FLAGS_FWRESET (__FNIC_FLAGS_FWRESET | \ | ||
116 | __FNIC_FLAGS_BLOCK_IO) | ||
117 | |||
118 | #define FNIC_FLAGS_IO_BLOCKED (__FNIC_FLAGS_BLOCK_IO) | ||
119 | |||
120 | #define fnic_set_state_flags(fnicp, st_flags) \ | ||
121 | __fnic_set_state_flags(fnicp, st_flags, 0) | ||
122 | |||
123 | #define fnic_clear_state_flags(fnicp, st_flags) \ | ||
124 | __fnic_set_state_flags(fnicp, st_flags, 1) | ||
125 | |||
77 | extern unsigned int fnic_log_level; | 126 | extern unsigned int fnic_log_level; |
78 | 127 | ||
79 | #define FNIC_MAIN_LOGGING 0x01 | 128 | #define FNIC_MAIN_LOGGING 0x01 |
@@ -170,6 +219,9 @@ struct fnic { | |||
170 | 219 | ||
171 | struct completion *remove_wait; /* device remove thread blocks */ | 220 | struct completion *remove_wait; /* device remove thread blocks */ |
172 | 221 | ||
222 | atomic_t in_flight; /* io counter */ | ||
223 | u32 _reserved; /* fill hole */ | ||
224 | unsigned long state_flags; /* protected by host lock */ | ||
173 | enum fnic_state state; | 225 | enum fnic_state state; |
174 | spinlock_t fnic_lock; | 226 | spinlock_t fnic_lock; |
175 | 227 | ||
@@ -267,4 +319,12 @@ const char *fnic_state_to_str(unsigned int state); | |||
267 | void fnic_log_q_error(struct fnic *fnic); | 319 | void fnic_log_q_error(struct fnic *fnic); |
268 | void fnic_handle_link_event(struct fnic *fnic); | 320 | void fnic_handle_link_event(struct fnic *fnic); |
269 | 321 | ||
322 | int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); | ||
323 | |||
324 | static inline int | ||
325 | fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) | ||
326 | { | ||
327 | return ((fnic->state_flags & st_flags) == st_flags); | ||
328 | } | ||
329 | void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); | ||
270 | #endif /* _FNIC_H_ */ | 330 | #endif /* _FNIC_H_ */ |
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c new file mode 100644 index 000000000000..adc1f7f471f5 --- /dev/null +++ b/drivers/scsi/fnic/fnic_debugfs.c | |||
@@ -0,0 +1,314 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Cisco Systems, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you may redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; version 2 of the License. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
10 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
11 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
12 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
14 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
15 | * SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/debugfs.h> | ||
21 | #include "fnic.h" | ||
22 | |||
23 | static struct dentry *fnic_trace_debugfs_root; | ||
24 | static struct dentry *fnic_trace_debugfs_file; | ||
25 | static struct dentry *fnic_trace_enable; | ||
26 | |||
27 | /* | ||
28 | * fnic_trace_ctrl_open - Open the trace_enable file | ||
29 | * @inode: The inode pointer. | ||
30 | * @file: The file pointer to attach the trace enable/disable flag. | ||
31 | * | ||
32 | * Description: | ||
33 | * This routine opens a debugsfs file trace_enable. | ||
34 | * | ||
35 | * Returns: | ||
36 | * This function returns zero if successful. | ||
37 | */ | ||
38 | static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp) | ||
39 | { | ||
40 | filp->private_data = inode->i_private; | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * fnic_trace_ctrl_read - Read a trace_enable debugfs file | ||
46 | * @filp: The file pointer to read from. | ||
47 | * @ubuf: The buffer to copy the data to. | ||
48 | * @cnt: The number of bytes to read. | ||
49 | * @ppos: The position in the file to start reading from. | ||
50 | * | ||
51 | * Description: | ||
52 | * This routine reads value of variable fnic_tracing_enabled | ||
53 | * and stores into local @buf. It will start reading file at @ppos and | ||
54 | * copy up to @cnt of data to @ubuf from @buf. | ||
55 | * | ||
56 | * Returns: | ||
57 | * This function returns the amount of data that was read. | ||
58 | */ | ||
59 | static ssize_t fnic_trace_ctrl_read(struct file *filp, | ||
60 | char __user *ubuf, | ||
61 | size_t cnt, loff_t *ppos) | ||
62 | { | ||
63 | char buf[64]; | ||
64 | int len; | ||
65 | len = sprintf(buf, "%u\n", fnic_tracing_enabled); | ||
66 | |||
67 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * fnic_trace_ctrl_write - Write to trace_enable debugfs file | ||
72 | * @filp: The file pointer to write from. | ||
73 | * @ubuf: The buffer to copy the data from. | ||
74 | * @cnt: The number of bytes to write. | ||
75 | * @ppos: The position in the file to start writing to. | ||
76 | * | ||
77 | * Description: | ||
78 | * This routine writes data from user buffer @ubuf to buffer @buf and | ||
79 | * sets fnic_tracing_enabled value as per user input. | ||
80 | * | ||
81 | * Returns: | ||
82 | * This function returns the amount of data that was written. | ||
83 | */ | ||
84 | static ssize_t fnic_trace_ctrl_write(struct file *filp, | ||
85 | const char __user *ubuf, | ||
86 | size_t cnt, loff_t *ppos) | ||
87 | { | ||
88 | char buf[64]; | ||
89 | unsigned long val; | ||
90 | int ret; | ||
91 | |||
92 | if (cnt >= sizeof(buf)) | ||
93 | return -EINVAL; | ||
94 | |||
95 | if (copy_from_user(&buf, ubuf, cnt)) | ||
96 | return -EFAULT; | ||
97 | |||
98 | buf[cnt] = 0; | ||
99 | |||
100 | ret = kstrtoul(buf, 10, &val); | ||
101 | if (ret < 0) | ||
102 | return ret; | ||
103 | |||
104 | fnic_tracing_enabled = val; | ||
105 | (*ppos)++; | ||
106 | |||
107 | return cnt; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * fnic_trace_debugfs_open - Open the fnic trace log | ||
112 | * @inode: The inode pointer | ||
113 | * @file: The file pointer to attach the log output | ||
114 | * | ||
115 | * Description: | ||
116 | * This routine is the entry point for the debugfs open file operation. | ||
117 | * It allocates the necessary buffer for the log, fills the buffer from | ||
118 | * the in-memory log and then returns a pointer to that log in | ||
119 | * the private_data field in @file. | ||
120 | * | ||
121 | * Returns: | ||
122 | * This function returns zero if successful. On error it will return | ||
123 | * a negative error value. | ||
124 | */ | ||
125 | static int fnic_trace_debugfs_open(struct inode *inode, | ||
126 | struct file *file) | ||
127 | { | ||
128 | fnic_dbgfs_t *fnic_dbg_prt; | ||
129 | fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL); | ||
130 | if (!fnic_dbg_prt) | ||
131 | return -ENOMEM; | ||
132 | |||
133 | fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE))); | ||
134 | if (!fnic_dbg_prt->buffer) { | ||
135 | kfree(fnic_dbg_prt); | ||
136 | return -ENOMEM; | ||
137 | } | ||
138 | memset((void *)fnic_dbg_prt->buffer, 0, | ||
139 | (3*(trace_max_pages * PAGE_SIZE))); | ||
140 | fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); | ||
141 | file->private_data = fnic_dbg_prt; | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * fnic_trace_debugfs_lseek - Seek through a debugfs file | ||
147 | * @file: The file pointer to seek through. | ||
148 | * @offset: The offset to seek to or the amount to seek by. | ||
149 | * @howto: Indicates how to seek. | ||
150 | * | ||
151 | * Description: | ||
152 | * This routine is the entry point for the debugfs lseek file operation. | ||
153 | * The @howto parameter indicates whether @offset is the offset to directly | ||
154 | * seek to, or if it is a value to seek forward or reverse by. This function | ||
155 | * figures out what the new offset of the debugfs file will be and assigns | ||
156 | * that value to the f_pos field of @file. | ||
157 | * | ||
158 | * Returns: | ||
159 | * This function returns the new offset if successful and returns a negative | ||
160 | * error if unable to process the seek. | ||
161 | */ | ||
162 | static loff_t fnic_trace_debugfs_lseek(struct file *file, | ||
163 | loff_t offset, | ||
164 | int howto) | ||
165 | { | ||
166 | fnic_dbgfs_t *fnic_dbg_prt = file->private_data; | ||
167 | loff_t pos = -1; | ||
168 | |||
169 | switch (howto) { | ||
170 | case 0: | ||
171 | pos = offset; | ||
172 | break; | ||
173 | case 1: | ||
174 | pos = file->f_pos + offset; | ||
175 | break; | ||
176 | case 2: | ||
177 | pos = fnic_dbg_prt->buffer_len - offset; | ||
178 | } | ||
179 | return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ? | ||
180 | -EINVAL : (file->f_pos = pos); | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * fnic_trace_debugfs_read - Read a debugfs file | ||
185 | * @file: The file pointer to read from. | ||
186 | * @ubuf: The buffer to copy the data to. | ||
187 | * @nbytes: The number of bytes to read. | ||
188 | * @pos: The position in the file to start reading from. | ||
189 | * | ||
190 | * Description: | ||
191 | * This routine reads data from the buffer indicated in the private_data | ||
192 | * field of @file. It will start reading at @pos and copy up to @nbytes of | ||
193 | * data to @ubuf. | ||
194 | * | ||
195 | * Returns: | ||
196 | * This function returns the amount of data that was read (this could be | ||
197 | * less than @nbytes if the end of the file was reached). | ||
198 | */ | ||
199 | static ssize_t fnic_trace_debugfs_read(struct file *file, | ||
200 | char __user *ubuf, | ||
201 | size_t nbytes, | ||
202 | loff_t *pos) | ||
203 | { | ||
204 | fnic_dbgfs_t *fnic_dbg_prt = file->private_data; | ||
205 | int rc = 0; | ||
206 | rc = simple_read_from_buffer(ubuf, nbytes, pos, | ||
207 | fnic_dbg_prt->buffer, | ||
208 | fnic_dbg_prt->buffer_len); | ||
209 | return rc; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * fnic_trace_debugfs_release - Release the buffer used to store | ||
214 | * debugfs file data | ||
215 | * @inode: The inode pointer | ||
216 | * @file: The file pointer that contains the buffer to release | ||
217 | * | ||
218 | * Description: | ||
219 | * This routine frees the buffer that was allocated when the debugfs | ||
220 | * file was opened. | ||
221 | * | ||
222 | * Returns: | ||
223 | * This function returns zero. | ||
224 | */ | ||
225 | static int fnic_trace_debugfs_release(struct inode *inode, | ||
226 | struct file *file) | ||
227 | { | ||
228 | fnic_dbgfs_t *fnic_dbg_prt = file->private_data; | ||
229 | |||
230 | vfree(fnic_dbg_prt->buffer); | ||
231 | kfree(fnic_dbg_prt); | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static const struct file_operations fnic_trace_ctrl_fops = { | ||
236 | .owner = THIS_MODULE, | ||
237 | .open = fnic_trace_ctrl_open, | ||
238 | .read = fnic_trace_ctrl_read, | ||
239 | .write = fnic_trace_ctrl_write, | ||
240 | }; | ||
241 | |||
242 | static const struct file_operations fnic_trace_debugfs_fops = { | ||
243 | .owner = THIS_MODULE, | ||
244 | .open = fnic_trace_debugfs_open, | ||
245 | .llseek = fnic_trace_debugfs_lseek, | ||
246 | .read = fnic_trace_debugfs_read, | ||
247 | .release = fnic_trace_debugfs_release, | ||
248 | }; | ||
249 | |||
250 | /* | ||
251 | * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging | ||
252 | * | ||
253 | * Description: | ||
254 | * When Debugfs is configured this routine sets up the fnic debugfs | ||
255 | * file system. If not already created, this routine will create the | ||
256 | * fnic directory. It will create file trace to log fnic trace buffer | ||
257 | * output into debugfs and it will also create file trace_enable to | ||
258 | * control enable/disable of trace logging into trace buffer. | ||
259 | */ | ||
260 | int fnic_trace_debugfs_init(void) | ||
261 | { | ||
262 | int rc = -1; | ||
263 | fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); | ||
264 | if (!fnic_trace_debugfs_root) { | ||
265 | printk(KERN_DEBUG "Cannot create debugfs root\n"); | ||
266 | return rc; | ||
267 | } | ||
268 | fnic_trace_enable = debugfs_create_file("tracing_enable", | ||
269 | S_IFREG|S_IRUGO|S_IWUSR, | ||
270 | fnic_trace_debugfs_root, | ||
271 | NULL, &fnic_trace_ctrl_fops); | ||
272 | |||
273 | if (!fnic_trace_enable) { | ||
274 | printk(KERN_DEBUG "Cannot create trace_enable file" | ||
275 | " under debugfs"); | ||
276 | return rc; | ||
277 | } | ||
278 | |||
279 | fnic_trace_debugfs_file = debugfs_create_file("trace", | ||
280 | S_IFREG|S_IRUGO|S_IWUSR, | ||
281 | fnic_trace_debugfs_root, | ||
282 | NULL, | ||
283 | &fnic_trace_debugfs_fops); | ||
284 | |||
285 | if (!fnic_trace_debugfs_file) { | ||
286 | printk(KERN_DEBUG "Cannot create trace file under debugfs"); | ||
287 | return rc; | ||
288 | } | ||
289 | rc = 0; | ||
290 | return rc; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure | ||
295 | * | ||
296 | * Description: | ||
297 | * When Debugfs is configured this routine removes debugfs file system | ||
298 | * elements that are specific to fnic trace logging. | ||
299 | */ | ||
300 | void fnic_trace_debugfs_terminate(void) | ||
301 | { | ||
302 | if (fnic_trace_debugfs_file) { | ||
303 | debugfs_remove(fnic_trace_debugfs_file); | ||
304 | fnic_trace_debugfs_file = NULL; | ||
305 | } | ||
306 | if (fnic_trace_enable) { | ||
307 | debugfs_remove(fnic_trace_enable); | ||
308 | fnic_trace_enable = NULL; | ||
309 | } | ||
310 | if (fnic_trace_debugfs_root) { | ||
311 | debugfs_remove(fnic_trace_debugfs_root); | ||
312 | fnic_trace_debugfs_root = NULL; | ||
313 | } | ||
314 | } | ||
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h index f0b896988cd5..c35b8f1889ea 100644 --- a/drivers/scsi/fnic/fnic_io.h +++ b/drivers/scsi/fnic/fnic_io.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <scsi/fc/fc_fcp.h> | 21 | #include <scsi/fc/fc_fcp.h> |
22 | 22 | ||
23 | #define FNIC_DFLT_SG_DESC_CNT 32 | 23 | #define FNIC_DFLT_SG_DESC_CNT 32 |
24 | #define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */ | 24 | #define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */ |
25 | #define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */ | 25 | #define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */ |
26 | 26 | ||
27 | struct host_sg_desc { | 27 | struct host_sg_desc { |
@@ -45,7 +45,8 @@ enum fnic_sgl_list_type { | |||
45 | }; | 45 | }; |
46 | 46 | ||
47 | enum fnic_ioreq_state { | 47 | enum fnic_ioreq_state { |
48 | FNIC_IOREQ_CMD_PENDING = 0, | 48 | FNIC_IOREQ_NOT_INITED = 0, |
49 | FNIC_IOREQ_CMD_PENDING, | ||
49 | FNIC_IOREQ_ABTS_PENDING, | 50 | FNIC_IOREQ_ABTS_PENDING, |
50 | FNIC_IOREQ_ABTS_COMPLETE, | 51 | FNIC_IOREQ_ABTS_COMPLETE, |
51 | FNIC_IOREQ_CMD_COMPLETE, | 52 | FNIC_IOREQ_CMD_COMPLETE, |
@@ -60,6 +61,7 @@ struct fnic_io_req { | |||
60 | u8 sgl_type; /* device DMA descriptor list type */ | 61 | u8 sgl_type; /* device DMA descriptor list type */ |
61 | u8 io_completed:1; /* set to 1 when fw completes IO */ | 62 | u8 io_completed:1; /* set to 1 when fw completes IO */ |
62 | u32 port_id; /* remote port DID */ | 63 | u32 port_id; /* remote port DID */ |
64 | unsigned long start_time; /* in jiffies */ | ||
63 | struct completion *abts_done; /* completion for abts */ | 65 | struct completion *abts_done; /* completion for abts */ |
64 | struct completion *dr_done; /* completion for device reset */ | 66 | struct completion *dr_done; /* completion for device reset */ |
65 | }; | 67 | }; |
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index fbf3ac6e0c55..d601ac543c52 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c | |||
@@ -68,6 +68,10 @@ unsigned int fnic_log_level; | |||
68 | module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); | 68 | module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); |
69 | MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); | 69 | MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); |
70 | 70 | ||
71 | unsigned int fnic_trace_max_pages = 16; | ||
72 | module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR); | ||
73 | MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " | ||
74 | "for fnic trace buffer"); | ||
71 | 75 | ||
72 | static struct libfc_function_template fnic_transport_template = { | 76 | static struct libfc_function_template fnic_transport_template = { |
73 | .frame_send = fnic_send, | 77 | .frame_send = fnic_send, |
@@ -624,6 +628,9 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
624 | } | 628 | } |
625 | fnic->state = FNIC_IN_FC_MODE; | 629 | fnic->state = FNIC_IN_FC_MODE; |
626 | 630 | ||
631 | atomic_set(&fnic->in_flight, 0); | ||
632 | fnic->state_flags = FNIC_FLAGS_NONE; | ||
633 | |||
627 | /* Enable hardware stripping of vlan header on ingress */ | 634 | /* Enable hardware stripping of vlan header on ingress */ |
628 | fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); | 635 | fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); |
629 | 636 | ||
@@ -858,6 +865,14 @@ static int __init fnic_init_module(void) | |||
858 | 865 | ||
859 | printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); | 866 | printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); |
860 | 867 | ||
868 | /* Allocate memory for trace buffer */ | ||
869 | err = fnic_trace_buf_init(); | ||
870 | if (err < 0) { | ||
871 | printk(KERN_ERR PFX "Trace buffer initialization Failed " | ||
872 | "Fnic Tracing utility is disabled\n"); | ||
873 | fnic_trace_free(); | ||
874 | } | ||
875 | |||
861 | /* Create a cache for allocation of default size sgls */ | 876 | /* Create a cache for allocation of default size sgls */ |
862 | len = sizeof(struct fnic_dflt_sgl_list); | 877 | len = sizeof(struct fnic_dflt_sgl_list); |
863 | fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create | 878 | fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create |
@@ -928,6 +943,7 @@ err_create_fnic_ioreq_slab: | |||
928 | err_create_fnic_sgl_slab_max: | 943 | err_create_fnic_sgl_slab_max: |
929 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); | 944 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); |
930 | err_create_fnic_sgl_slab_dflt: | 945 | err_create_fnic_sgl_slab_dflt: |
946 | fnic_trace_free(); | ||
931 | return err; | 947 | return err; |
932 | } | 948 | } |
933 | 949 | ||
@@ -939,6 +955,7 @@ static void __exit fnic_cleanup_module(void) | |||
939 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); | 955 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); |
940 | kmem_cache_destroy(fnic_io_req_cache); | 956 | kmem_cache_destroy(fnic_io_req_cache); |
941 | fc_release_transport(fnic_fc_transport); | 957 | fc_release_transport(fnic_fc_transport); |
958 | fnic_trace_free(); | ||
942 | } | 959 | } |
943 | 960 | ||
944 | module_init(fnic_init_module); | 961 | module_init(fnic_init_module); |
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index c40ce52ed7c6..be99e7549d89 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
@@ -47,6 +47,7 @@ const char *fnic_state_str[] = { | |||
47 | }; | 47 | }; |
48 | 48 | ||
49 | static const char *fnic_ioreq_state_str[] = { | 49 | static const char *fnic_ioreq_state_str[] = { |
50 | [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED", | ||
50 | [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING", | 51 | [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING", |
51 | [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING", | 52 | [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING", |
52 | [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE", | 53 | [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE", |
@@ -165,6 +166,33 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) | |||
165 | } | 166 | } |
166 | 167 | ||
167 | 168 | ||
169 | /** | ||
170 | * __fnic_set_state_flags | ||
171 | * Sets/Clears bits in fnic's state_flags | ||
172 | **/ | ||
173 | void | ||
174 | __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags, | ||
175 | unsigned long clearbits) | ||
176 | { | ||
177 | struct Scsi_Host *host = fnic->lport->host; | ||
178 | int sh_locked = spin_is_locked(host->host_lock); | ||
179 | unsigned long flags = 0; | ||
180 | |||
181 | if (!sh_locked) | ||
182 | spin_lock_irqsave(host->host_lock, flags); | ||
183 | |||
184 | if (clearbits) | ||
185 | fnic->state_flags &= ~st_flags; | ||
186 | else | ||
187 | fnic->state_flags |= st_flags; | ||
188 | |||
189 | if (!sh_locked) | ||
190 | spin_unlock_irqrestore(host->host_lock, flags); | ||
191 | |||
192 | return; | ||
193 | } | ||
194 | |||
195 | |||
168 | /* | 196 | /* |
169 | * fnic_fw_reset_handler | 197 | * fnic_fw_reset_handler |
170 | * Routine to send reset msg to fw | 198 | * Routine to send reset msg to fw |
@@ -175,9 +203,16 @@ int fnic_fw_reset_handler(struct fnic *fnic) | |||
175 | int ret = 0; | 203 | int ret = 0; |
176 | unsigned long flags; | 204 | unsigned long flags; |
177 | 205 | ||
206 | /* indicate fwreset to io path */ | ||
207 | fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); | ||
208 | |||
178 | skb_queue_purge(&fnic->frame_queue); | 209 | skb_queue_purge(&fnic->frame_queue); |
179 | skb_queue_purge(&fnic->tx_queue); | 210 | skb_queue_purge(&fnic->tx_queue); |
180 | 211 | ||
212 | /* wait for io cmpl */ | ||
213 | while (atomic_read(&fnic->in_flight)) | ||
214 | schedule_timeout(msecs_to_jiffies(1)); | ||
215 | |||
181 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); | 216 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); |
182 | 217 | ||
183 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) | 218 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) |
@@ -193,9 +228,12 @@ int fnic_fw_reset_handler(struct fnic *fnic) | |||
193 | if (!ret) | 228 | if (!ret) |
194 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 229 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
195 | "Issued fw reset\n"); | 230 | "Issued fw reset\n"); |
196 | else | 231 | else { |
232 | fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); | ||
197 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 233 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
198 | "Failed to issue fw reset\n"); | 234 | "Failed to issue fw reset\n"); |
235 | } | ||
236 | |||
199 | return ret; | 237 | return ret; |
200 | } | 238 | } |
201 | 239 | ||
@@ -312,6 +350,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
312 | 350 | ||
313 | if (unlikely(!vnic_wq_copy_desc_avail(wq))) { | 351 | if (unlikely(!vnic_wq_copy_desc_avail(wq))) { |
314 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); | 352 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); |
353 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
354 | "fnic_queue_wq_copy_desc failure - no descriptors\n"); | ||
315 | return SCSI_MLQUEUE_HOST_BUSY; | 355 | return SCSI_MLQUEUE_HOST_BUSY; |
316 | } | 356 | } |
317 | 357 | ||
@@ -351,16 +391,20 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
351 | */ | 391 | */ |
352 | static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 392 | static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) |
353 | { | 393 | { |
354 | struct fc_lport *lp; | 394 | struct fc_lport *lp = shost_priv(sc->device->host); |
355 | struct fc_rport *rport; | 395 | struct fc_rport *rport; |
356 | struct fnic_io_req *io_req; | 396 | struct fnic_io_req *io_req = NULL; |
357 | struct fnic *fnic; | 397 | struct fnic *fnic = lport_priv(lp); |
358 | struct vnic_wq_copy *wq; | 398 | struct vnic_wq_copy *wq; |
359 | int ret; | 399 | int ret; |
360 | int sg_count; | 400 | u64 cmd_trace; |
401 | int sg_count = 0; | ||
361 | unsigned long flags; | 402 | unsigned long flags; |
362 | unsigned long ptr; | 403 | unsigned long ptr; |
363 | 404 | ||
405 | if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) | ||
406 | return SCSI_MLQUEUE_HOST_BUSY; | ||
407 | |||
364 | rport = starget_to_rport(scsi_target(sc->device)); | 408 | rport = starget_to_rport(scsi_target(sc->device)); |
365 | ret = fc_remote_port_chkready(rport); | 409 | ret = fc_remote_port_chkready(rport); |
366 | if (ret) { | 410 | if (ret) { |
@@ -369,20 +413,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
369 | return 0; | 413 | return 0; |
370 | } | 414 | } |
371 | 415 | ||
372 | lp = shost_priv(sc->device->host); | ||
373 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) | 416 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) |
374 | return SCSI_MLQUEUE_HOST_BUSY; | 417 | return SCSI_MLQUEUE_HOST_BUSY; |
375 | 418 | ||
419 | atomic_inc(&fnic->in_flight); | ||
420 | |||
376 | /* | 421 | /* |
377 | * Release host lock, use driver resource specific locks from here. | 422 | * Release host lock, use driver resource specific locks from here. |
378 | * Don't re-enable interrupts in case they were disabled prior to the | 423 | * Don't re-enable interrupts in case they were disabled prior to the |
379 | * caller disabling them. | 424 | * caller disabling them. |
380 | */ | 425 | */ |
381 | spin_unlock(lp->host->host_lock); | 426 | spin_unlock(lp->host->host_lock); |
427 | CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED; | ||
428 | CMD_FLAGS(sc) = FNIC_NO_FLAGS; | ||
382 | 429 | ||
383 | /* Get a new io_req for this SCSI IO */ | 430 | /* Get a new io_req for this SCSI IO */ |
384 | fnic = lport_priv(lp); | ||
385 | |||
386 | io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); | 431 | io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); |
387 | if (!io_req) { | 432 | if (!io_req) { |
388 | ret = SCSI_MLQUEUE_HOST_BUSY; | 433 | ret = SCSI_MLQUEUE_HOST_BUSY; |
@@ -393,6 +438,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
393 | /* Map the data buffer */ | 438 | /* Map the data buffer */ |
394 | sg_count = scsi_dma_map(sc); | 439 | sg_count = scsi_dma_map(sc); |
395 | if (sg_count < 0) { | 440 | if (sg_count < 0) { |
441 | FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, | ||
442 | sc->request->tag, sc, 0, sc->cmnd[0], | ||
443 | sg_count, CMD_STATE(sc)); | ||
396 | mempool_free(io_req, fnic->io_req_pool); | 444 | mempool_free(io_req, fnic->io_req_pool); |
397 | goto out; | 445 | goto out; |
398 | } | 446 | } |
@@ -427,8 +475,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
427 | 475 | ||
428 | /* initialize rest of io_req */ | 476 | /* initialize rest of io_req */ |
429 | io_req->port_id = rport->port_id; | 477 | io_req->port_id = rport->port_id; |
478 | io_req->start_time = jiffies; | ||
430 | CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; | 479 | CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; |
431 | CMD_SP(sc) = (char *)io_req; | 480 | CMD_SP(sc) = (char *)io_req; |
481 | CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED; | ||
432 | sc->scsi_done = done; | 482 | sc->scsi_done = done; |
433 | 483 | ||
434 | /* create copy wq desc and enqueue it */ | 484 | /* create copy wq desc and enqueue it */ |
@@ -440,7 +490,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
440 | * refetch the pointer under the lock. | 490 | * refetch the pointer under the lock. |
441 | */ | 491 | */ |
442 | spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc); | 492 | spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc); |
443 | 493 | FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, | |
494 | sc->request->tag, sc, 0, 0, 0, | ||
495 | (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); | ||
444 | spin_lock_irqsave(io_lock, flags); | 496 | spin_lock_irqsave(io_lock, flags); |
445 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 497 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
446 | CMD_SP(sc) = NULL; | 498 | CMD_SP(sc) = NULL; |
@@ -450,8 +502,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
450 | fnic_release_ioreq_buf(fnic, io_req, sc); | 502 | fnic_release_ioreq_buf(fnic, io_req, sc); |
451 | mempool_free(io_req, fnic->io_req_pool); | 503 | mempool_free(io_req, fnic->io_req_pool); |
452 | } | 504 | } |
505 | } else { | ||
506 | /* REVISIT: Use per IO lock in the final code */ | ||
507 | CMD_FLAGS(sc) |= FNIC_IO_ISSUED; | ||
453 | } | 508 | } |
454 | out: | 509 | out: |
510 | cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | | ||
511 | (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | | ||
512 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | | ||
513 | sc->cmnd[5]); | ||
514 | |||
515 | FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, | ||
516 | sc->request->tag, sc, io_req, | ||
517 | sg_count, cmd_trace, | ||
518 | (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); | ||
519 | atomic_dec(&fnic->in_flight); | ||
455 | /* acquire host lock before returning to SCSI */ | 520 | /* acquire host lock before returning to SCSI */ |
456 | spin_lock(lp->host->host_lock); | 521 | spin_lock(lp->host->host_lock); |
457 | return ret; | 522 | return ret; |
@@ -529,6 +594,8 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, | |||
529 | fnic_flush_tx(fnic); | 594 | fnic_flush_tx(fnic); |
530 | 595 | ||
531 | reset_cmpl_handler_end: | 596 | reset_cmpl_handler_end: |
597 | fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); | ||
598 | |||
532 | return ret; | 599 | return ret; |
533 | } | 600 | } |
534 | 601 | ||
@@ -622,6 +689,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, | |||
622 | struct vnic_wq_copy *wq; | 689 | struct vnic_wq_copy *wq; |
623 | u16 request_out = desc->u.ack.request_out; | 690 | u16 request_out = desc->u.ack.request_out; |
624 | unsigned long flags; | 691 | unsigned long flags; |
692 | u64 *ox_id_tag = (u64 *)(void *)desc; | ||
625 | 693 | ||
626 | /* mark the ack state */ | 694 | /* mark the ack state */ |
627 | wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; | 695 | wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; |
@@ -632,6 +700,9 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, | |||
632 | fnic->fw_ack_recd[0] = 1; | 700 | fnic->fw_ack_recd[0] = 1; |
633 | } | 701 | } |
634 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | 702 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); |
703 | FNIC_TRACE(fnic_fcpio_ack_handler, | ||
704 | fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], | ||
705 | ox_id_tag[4], ox_id_tag[5]); | ||
635 | } | 706 | } |
636 | 707 | ||
637 | /* | 708 | /* |
@@ -651,27 +722,53 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
651 | struct scsi_cmnd *sc; | 722 | struct scsi_cmnd *sc; |
652 | unsigned long flags; | 723 | unsigned long flags; |
653 | spinlock_t *io_lock; | 724 | spinlock_t *io_lock; |
725 | u64 cmd_trace; | ||
726 | unsigned long start_time; | ||
654 | 727 | ||
655 | /* Decode the cmpl description to get the io_req id */ | 728 | /* Decode the cmpl description to get the io_req id */ |
656 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | 729 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); |
657 | fcpio_tag_id_dec(&tag, &id); | 730 | fcpio_tag_id_dec(&tag, &id); |
731 | icmnd_cmpl = &desc->u.icmnd_cmpl; | ||
658 | 732 | ||
659 | if (id >= FNIC_MAX_IO_REQ) | 733 | if (id >= FNIC_MAX_IO_REQ) { |
734 | shost_printk(KERN_ERR, fnic->lport->host, | ||
735 | "Tag out of range tag %x hdr status = %s\n", | ||
736 | id, fnic_fcpio_status_to_str(hdr_status)); | ||
660 | return; | 737 | return; |
738 | } | ||
661 | 739 | ||
662 | sc = scsi_host_find_tag(fnic->lport->host, id); | 740 | sc = scsi_host_find_tag(fnic->lport->host, id); |
663 | WARN_ON_ONCE(!sc); | 741 | WARN_ON_ONCE(!sc); |
664 | if (!sc) | 742 | if (!sc) { |
743 | shost_printk(KERN_ERR, fnic->lport->host, | ||
744 | "icmnd_cmpl sc is null - " | ||
745 | "hdr status = %s tag = 0x%x desc = 0x%p\n", | ||
746 | fnic_fcpio_status_to_str(hdr_status), id, desc); | ||
747 | FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, | ||
748 | fnic->lport->host->host_no, id, | ||
749 | ((u64)icmnd_cmpl->_resvd0[1] << 16 | | ||
750 | (u64)icmnd_cmpl->_resvd0[0]), | ||
751 | ((u64)hdr_status << 16 | | ||
752 | (u64)icmnd_cmpl->scsi_status << 8 | | ||
753 | (u64)icmnd_cmpl->flags), desc, | ||
754 | (u64)icmnd_cmpl->residual, 0); | ||
665 | return; | 755 | return; |
756 | } | ||
666 | 757 | ||
667 | io_lock = fnic_io_lock_hash(fnic, sc); | 758 | io_lock = fnic_io_lock_hash(fnic, sc); |
668 | spin_lock_irqsave(io_lock, flags); | 759 | spin_lock_irqsave(io_lock, flags); |
669 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 760 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
670 | WARN_ON_ONCE(!io_req); | 761 | WARN_ON_ONCE(!io_req); |
671 | if (!io_req) { | 762 | if (!io_req) { |
763 | CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; | ||
672 | spin_unlock_irqrestore(io_lock, flags); | 764 | spin_unlock_irqrestore(io_lock, flags); |
765 | shost_printk(KERN_ERR, fnic->lport->host, | ||
766 | "icmnd_cmpl io_req is null - " | ||
767 | "hdr status = %s tag = 0x%x sc 0x%p\n", | ||
768 | fnic_fcpio_status_to_str(hdr_status), id, sc); | ||
673 | return; | 769 | return; |
674 | } | 770 | } |
771 | start_time = io_req->start_time; | ||
675 | 772 | ||
676 | /* firmware completed the io */ | 773 | /* firmware completed the io */ |
677 | io_req->io_completed = 1; | 774 | io_req->io_completed = 1; |
@@ -682,6 +779,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
682 | */ | 779 | */ |
683 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | 780 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { |
684 | spin_unlock_irqrestore(io_lock, flags); | 781 | spin_unlock_irqrestore(io_lock, flags); |
782 | CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING; | ||
783 | switch (hdr_status) { | ||
784 | case FCPIO_SUCCESS: | ||
785 | CMD_FLAGS(sc) |= FNIC_IO_DONE; | ||
786 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
787 | "icmnd_cmpl ABTS pending hdr status = %s " | ||
788 | "sc 0x%p scsi_status %x residual %d\n", | ||
789 | fnic_fcpio_status_to_str(hdr_status), sc, | ||
790 | icmnd_cmpl->scsi_status, | ||
791 | icmnd_cmpl->residual); | ||
792 | break; | ||
793 | case FCPIO_ABORTED: | ||
794 | CMD_FLAGS(sc) |= FNIC_IO_ABORTED; | ||
795 | break; | ||
796 | default: | ||
797 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
798 | "icmnd_cmpl abts pending " | ||
799 | "hdr status = %s tag = 0x%x sc = 0x%p\n", | ||
800 | fnic_fcpio_status_to_str(hdr_status), | ||
801 | id, sc); | ||
802 | break; | ||
803 | } | ||
685 | return; | 804 | return; |
686 | } | 805 | } |
687 | 806 | ||
@@ -765,6 +884,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
765 | 884 | ||
766 | /* Break link with the SCSI command */ | 885 | /* Break link with the SCSI command */ |
767 | CMD_SP(sc) = NULL; | 886 | CMD_SP(sc) = NULL; |
887 | CMD_FLAGS(sc) |= FNIC_IO_DONE; | ||
768 | 888 | ||
769 | spin_unlock_irqrestore(io_lock, flags); | 889 | spin_unlock_irqrestore(io_lock, flags); |
770 | 890 | ||
@@ -772,6 +892,20 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
772 | 892 | ||
773 | mempool_free(io_req, fnic->io_req_pool); | 893 | mempool_free(io_req, fnic->io_req_pool); |
774 | 894 | ||
895 | cmd_trace = ((u64)hdr_status << 56) | | ||
896 | (u64)icmnd_cmpl->scsi_status << 48 | | ||
897 | (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 | | ||
898 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | | ||
899 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]; | ||
900 | |||
901 | FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, | ||
902 | sc->device->host->host_no, id, sc, | ||
903 | ((u64)icmnd_cmpl->_resvd0[1] << 56 | | ||
904 | (u64)icmnd_cmpl->_resvd0[0] << 48 | | ||
905 | jiffies_to_msecs(jiffies - start_time)), | ||
906 | desc, cmd_trace, | ||
907 | (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); | ||
908 | |||
775 | if (sc->sc_data_direction == DMA_FROM_DEVICE) { | 909 | if (sc->sc_data_direction == DMA_FROM_DEVICE) { |
776 | fnic->lport->host_stats.fcp_input_requests++; | 910 | fnic->lport->host_stats.fcp_input_requests++; |
777 | fnic->fcp_input_bytes += xfer_len; | 911 | fnic->fcp_input_bytes += xfer_len; |
@@ -784,7 +918,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
784 | /* Call SCSI completion function to complete the IO */ | 918 | /* Call SCSI completion function to complete the IO */ |
785 | if (sc->scsi_done) | 919 | if (sc->scsi_done) |
786 | sc->scsi_done(sc); | 920 | sc->scsi_done(sc); |
787 | |||
788 | } | 921 | } |
789 | 922 | ||
790 | /* fnic_fcpio_itmf_cmpl_handler | 923 | /* fnic_fcpio_itmf_cmpl_handler |
@@ -801,28 +934,54 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
801 | struct fnic_io_req *io_req; | 934 | struct fnic_io_req *io_req; |
802 | unsigned long flags; | 935 | unsigned long flags; |
803 | spinlock_t *io_lock; | 936 | spinlock_t *io_lock; |
937 | unsigned long start_time; | ||
804 | 938 | ||
805 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | 939 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); |
806 | fcpio_tag_id_dec(&tag, &id); | 940 | fcpio_tag_id_dec(&tag, &id); |
807 | 941 | ||
808 | if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) | 942 | if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) { |
943 | shost_printk(KERN_ERR, fnic->lport->host, | ||
944 | "Tag out of range tag %x hdr status = %s\n", | ||
945 | id, fnic_fcpio_status_to_str(hdr_status)); | ||
809 | return; | 946 | return; |
947 | } | ||
810 | 948 | ||
811 | sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); | 949 | sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); |
812 | WARN_ON_ONCE(!sc); | 950 | WARN_ON_ONCE(!sc); |
813 | if (!sc) | 951 | if (!sc) { |
952 | shost_printk(KERN_ERR, fnic->lport->host, | ||
953 | "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", | ||
954 | fnic_fcpio_status_to_str(hdr_status), id); | ||
814 | return; | 955 | return; |
815 | 956 | } | |
816 | io_lock = fnic_io_lock_hash(fnic, sc); | 957 | io_lock = fnic_io_lock_hash(fnic, sc); |
817 | spin_lock_irqsave(io_lock, flags); | 958 | spin_lock_irqsave(io_lock, flags); |
818 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 959 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
819 | WARN_ON_ONCE(!io_req); | 960 | WARN_ON_ONCE(!io_req); |
820 | if (!io_req) { | 961 | if (!io_req) { |
821 | spin_unlock_irqrestore(io_lock, flags); | 962 | spin_unlock_irqrestore(io_lock, flags); |
963 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; | ||
964 | shost_printk(KERN_ERR, fnic->lport->host, | ||
965 | "itmf_cmpl io_req is null - " | ||
966 | "hdr status = %s tag = 0x%x sc 0x%p\n", | ||
967 | fnic_fcpio_status_to_str(hdr_status), id, sc); | ||
822 | return; | 968 | return; |
823 | } | 969 | } |
970 | start_time = io_req->start_time; | ||
824 | 971 | ||
825 | if (id & FNIC_TAG_ABORT) { | 972 | if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { |
973 | /* Abort and terminate completion of device reset req */ | ||
974 | /* REVISIT : Add asserts about various flags */ | ||
975 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
976 | "dev reset abts cmpl recd. id %x status %s\n", | ||
977 | id, fnic_fcpio_status_to_str(hdr_status)); | ||
978 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; | ||
979 | CMD_ABTS_STATUS(sc) = hdr_status; | ||
980 | CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; | ||
981 | if (io_req->abts_done) | ||
982 | complete(io_req->abts_done); | ||
983 | spin_unlock_irqrestore(io_lock, flags); | ||
984 | } else if (id & FNIC_TAG_ABORT) { | ||
826 | /* Completion of abort cmd */ | 985 | /* Completion of abort cmd */ |
827 | if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { | 986 | if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { |
828 | /* This is a late completion. Ignore it */ | 987 | /* This is a late completion. Ignore it */ |
@@ -832,6 +991,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
832 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; | 991 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; |
833 | CMD_ABTS_STATUS(sc) = hdr_status; | 992 | CMD_ABTS_STATUS(sc) = hdr_status; |
834 | 993 | ||
994 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; | ||
835 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 995 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
836 | "abts cmpl recd. id %d status %s\n", | 996 | "abts cmpl recd. id %d status %s\n", |
837 | (int)(id & FNIC_TAG_MASK), | 997 | (int)(id & FNIC_TAG_MASK), |
@@ -855,14 +1015,58 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
855 | 1015 | ||
856 | fnic_release_ioreq_buf(fnic, io_req, sc); | 1016 | fnic_release_ioreq_buf(fnic, io_req, sc); |
857 | mempool_free(io_req, fnic->io_req_pool); | 1017 | mempool_free(io_req, fnic->io_req_pool); |
858 | if (sc->scsi_done) | 1018 | if (sc->scsi_done) { |
1019 | FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, | ||
1020 | sc->device->host->host_no, id, | ||
1021 | sc, | ||
1022 | jiffies_to_msecs(jiffies - start_time), | ||
1023 | desc, | ||
1024 | (((u64)hdr_status << 40) | | ||
1025 | (u64)sc->cmnd[0] << 32 | | ||
1026 | (u64)sc->cmnd[2] << 24 | | ||
1027 | (u64)sc->cmnd[3] << 16 | | ||
1028 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), | ||
1029 | (((u64)CMD_FLAGS(sc) << 32) | | ||
1030 | CMD_STATE(sc))); | ||
859 | sc->scsi_done(sc); | 1031 | sc->scsi_done(sc); |
1032 | } | ||
860 | } | 1033 | } |
861 | 1034 | ||
862 | } else if (id & FNIC_TAG_DEV_RST) { | 1035 | } else if (id & FNIC_TAG_DEV_RST) { |
863 | /* Completion of device reset */ | 1036 | /* Completion of device reset */ |
864 | CMD_LR_STATUS(sc) = hdr_status; | 1037 | CMD_LR_STATUS(sc) = hdr_status; |
1038 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | ||
1039 | spin_unlock_irqrestore(io_lock, flags); | ||
1040 | CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING; | ||
1041 | FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, | ||
1042 | sc->device->host->host_no, id, sc, | ||
1043 | jiffies_to_msecs(jiffies - start_time), | ||
1044 | desc, 0, | ||
1045 | (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); | ||
1046 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1047 | "Terminate pending " | ||
1048 | "dev reset cmpl recd. id %d status %s\n", | ||
1049 | (int)(id & FNIC_TAG_MASK), | ||
1050 | fnic_fcpio_status_to_str(hdr_status)); | ||
1051 | return; | ||
1052 | } | ||
1053 | if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) { | ||
1054 | /* Need to wait for terminate completion */ | ||
1055 | spin_unlock_irqrestore(io_lock, flags); | ||
1056 | FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, | ||
1057 | sc->device->host->host_no, id, sc, | ||
1058 | jiffies_to_msecs(jiffies - start_time), | ||
1059 | desc, 0, | ||
1060 | (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); | ||
1061 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1062 | "dev reset cmpl recd after time out. " | ||
1063 | "id %d status %s\n", | ||
1064 | (int)(id & FNIC_TAG_MASK), | ||
1065 | fnic_fcpio_status_to_str(hdr_status)); | ||
1066 | return; | ||
1067 | } | ||
865 | CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; | 1068 | CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; |
1069 | CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; | ||
866 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 1070 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
867 | "dev reset cmpl recd. id %d status %s\n", | 1071 | "dev reset cmpl recd. id %d status %s\n", |
868 | (int)(id & FNIC_TAG_MASK), | 1072 | (int)(id & FNIC_TAG_MASK), |
@@ -889,7 +1093,6 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, | |||
889 | struct fcpio_fw_req *desc) | 1093 | struct fcpio_fw_req *desc) |
890 | { | 1094 | { |
891 | struct fnic *fnic = vnic_dev_priv(vdev); | 1095 | struct fnic *fnic = vnic_dev_priv(vdev); |
892 | int ret = 0; | ||
893 | 1096 | ||
894 | switch (desc->hdr.type) { | 1097 | switch (desc->hdr.type) { |
895 | case FCPIO_ACK: /* fw copied copy wq desc to its queue */ | 1098 | case FCPIO_ACK: /* fw copied copy wq desc to its queue */ |
@@ -906,11 +1109,11 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, | |||
906 | 1109 | ||
907 | case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ | 1110 | case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ |
908 | case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ | 1111 | case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ |
909 | ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); | 1112 | fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); |
910 | break; | 1113 | break; |
911 | 1114 | ||
912 | case FCPIO_RESET_CMPL: /* fw completed reset */ | 1115 | case FCPIO_RESET_CMPL: /* fw completed reset */ |
913 | ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); | 1116 | fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); |
914 | break; | 1117 | break; |
915 | 1118 | ||
916 | default: | 1119 | default: |
@@ -920,7 +1123,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, | |||
920 | break; | 1123 | break; |
921 | } | 1124 | } |
922 | 1125 | ||
923 | return ret; | 1126 | return 0; |
924 | } | 1127 | } |
925 | 1128 | ||
926 | /* | 1129 | /* |
@@ -950,6 +1153,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) | |||
950 | unsigned long flags = 0; | 1153 | unsigned long flags = 0; |
951 | struct scsi_cmnd *sc; | 1154 | struct scsi_cmnd *sc; |
952 | spinlock_t *io_lock; | 1155 | spinlock_t *io_lock; |
1156 | unsigned long start_time = 0; | ||
953 | 1157 | ||
954 | for (i = 0; i < FNIC_MAX_IO_REQ; i++) { | 1158 | for (i = 0; i < FNIC_MAX_IO_REQ; i++) { |
955 | if (i == exclude_id) | 1159 | if (i == exclude_id) |
@@ -962,6 +1166,23 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) | |||
962 | io_lock = fnic_io_lock_hash(fnic, sc); | 1166 | io_lock = fnic_io_lock_hash(fnic, sc); |
963 | spin_lock_irqsave(io_lock, flags); | 1167 | spin_lock_irqsave(io_lock, flags); |
964 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1168 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1169 | if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && | ||
1170 | !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { | ||
1171 | /* | ||
1172 | * We will be here only when FW completes reset | ||
1173 | * without sending completions for outstanding ios. | ||
1174 | */ | ||
1175 | CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; | ||
1176 | if (io_req && io_req->dr_done) | ||
1177 | complete(io_req->dr_done); | ||
1178 | else if (io_req && io_req->abts_done) | ||
1179 | complete(io_req->abts_done); | ||
1180 | spin_unlock_irqrestore(io_lock, flags); | ||
1181 | continue; | ||
1182 | } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { | ||
1183 | spin_unlock_irqrestore(io_lock, flags); | ||
1184 | continue; | ||
1185 | } | ||
965 | if (!io_req) { | 1186 | if (!io_req) { |
966 | spin_unlock_irqrestore(io_lock, flags); | 1187 | spin_unlock_irqrestore(io_lock, flags); |
967 | goto cleanup_scsi_cmd; | 1188 | goto cleanup_scsi_cmd; |
@@ -975,6 +1196,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) | |||
975 | * If there is a scsi_cmnd associated with this io_req, then | 1196 | * If there is a scsi_cmnd associated with this io_req, then |
976 | * free the corresponding state | 1197 | * free the corresponding state |
977 | */ | 1198 | */ |
1199 | start_time = io_req->start_time; | ||
978 | fnic_release_ioreq_buf(fnic, io_req, sc); | 1200 | fnic_release_ioreq_buf(fnic, io_req, sc); |
979 | mempool_free(io_req, fnic->io_req_pool); | 1201 | mempool_free(io_req, fnic->io_req_pool); |
980 | 1202 | ||
@@ -984,8 +1206,18 @@ cleanup_scsi_cmd: | |||
984 | " DID_TRANSPORT_DISRUPTED\n"); | 1206 | " DID_TRANSPORT_DISRUPTED\n"); |
985 | 1207 | ||
986 | /* Complete the command to SCSI */ | 1208 | /* Complete the command to SCSI */ |
987 | if (sc->scsi_done) | 1209 | if (sc->scsi_done) { |
1210 | FNIC_TRACE(fnic_cleanup_io, | ||
1211 | sc->device->host->host_no, i, sc, | ||
1212 | jiffies_to_msecs(jiffies - start_time), | ||
1213 | 0, ((u64)sc->cmnd[0] << 32 | | ||
1214 | (u64)sc->cmnd[2] << 24 | | ||
1215 | (u64)sc->cmnd[3] << 16 | | ||
1216 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), | ||
1217 | (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); | ||
1218 | |||
988 | sc->scsi_done(sc); | 1219 | sc->scsi_done(sc); |
1220 | } | ||
989 | } | 1221 | } |
990 | } | 1222 | } |
991 | 1223 | ||
@@ -998,6 +1230,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, | |||
998 | struct scsi_cmnd *sc; | 1230 | struct scsi_cmnd *sc; |
999 | unsigned long flags; | 1231 | unsigned long flags; |
1000 | spinlock_t *io_lock; | 1232 | spinlock_t *io_lock; |
1233 | unsigned long start_time = 0; | ||
1001 | 1234 | ||
1002 | /* get the tag reference */ | 1235 | /* get the tag reference */ |
1003 | fcpio_tag_id_dec(&desc->hdr.tag, &id); | 1236 | fcpio_tag_id_dec(&desc->hdr.tag, &id); |
@@ -1027,6 +1260,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, | |||
1027 | 1260 | ||
1028 | spin_unlock_irqrestore(io_lock, flags); | 1261 | spin_unlock_irqrestore(io_lock, flags); |
1029 | 1262 | ||
1263 | start_time = io_req->start_time; | ||
1030 | fnic_release_ioreq_buf(fnic, io_req, sc); | 1264 | fnic_release_ioreq_buf(fnic, io_req, sc); |
1031 | mempool_free(io_req, fnic->io_req_pool); | 1265 | mempool_free(io_req, fnic->io_req_pool); |
1032 | 1266 | ||
@@ -1035,8 +1269,17 @@ wq_copy_cleanup_scsi_cmd: | |||
1035 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" | 1269 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" |
1036 | " DID_NO_CONNECT\n"); | 1270 | " DID_NO_CONNECT\n"); |
1037 | 1271 | ||
1038 | if (sc->scsi_done) | 1272 | if (sc->scsi_done) { |
1273 | FNIC_TRACE(fnic_wq_copy_cleanup_handler, | ||
1274 | sc->device->host->host_no, id, sc, | ||
1275 | jiffies_to_msecs(jiffies - start_time), | ||
1276 | 0, ((u64)sc->cmnd[0] << 32 | | ||
1277 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | | ||
1278 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), | ||
1279 | (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); | ||
1280 | |||
1039 | sc->scsi_done(sc); | 1281 | sc->scsi_done(sc); |
1282 | } | ||
1040 | } | 1283 | } |
1041 | 1284 | ||
1042 | static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, | 1285 | static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, |
@@ -1044,8 +1287,18 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, | |||
1044 | struct fnic_io_req *io_req) | 1287 | struct fnic_io_req *io_req) |
1045 | { | 1288 | { |
1046 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; | 1289 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; |
1290 | struct Scsi_Host *host = fnic->lport->host; | ||
1047 | unsigned long flags; | 1291 | unsigned long flags; |
1048 | 1292 | ||
1293 | spin_lock_irqsave(host->host_lock, flags); | ||
1294 | if (unlikely(fnic_chk_state_flags_locked(fnic, | ||
1295 | FNIC_FLAGS_IO_BLOCKED))) { | ||
1296 | spin_unlock_irqrestore(host->host_lock, flags); | ||
1297 | return 1; | ||
1298 | } else | ||
1299 | atomic_inc(&fnic->in_flight); | ||
1300 | spin_unlock_irqrestore(host->host_lock, flags); | ||
1301 | |||
1049 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); | 1302 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); |
1050 | 1303 | ||
1051 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) | 1304 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) |
@@ -1053,6 +1306,9 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, | |||
1053 | 1306 | ||
1054 | if (!vnic_wq_copy_desc_avail(wq)) { | 1307 | if (!vnic_wq_copy_desc_avail(wq)) { |
1055 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | 1308 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); |
1309 | atomic_dec(&fnic->in_flight); | ||
1310 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1311 | "fnic_queue_abort_io_req: failure: no descriptors\n"); | ||
1056 | return 1; | 1312 | return 1; |
1057 | } | 1313 | } |
1058 | fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, | 1314 | fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, |
@@ -1060,12 +1316,15 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, | |||
1060 | fnic->config.ra_tov, fnic->config.ed_tov); | 1316 | fnic->config.ra_tov, fnic->config.ed_tov); |
1061 | 1317 | ||
1062 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | 1318 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); |
1319 | atomic_dec(&fnic->in_flight); | ||
1320 | |||
1063 | return 0; | 1321 | return 0; |
1064 | } | 1322 | } |
1065 | 1323 | ||
1066 | void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | 1324 | static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) |
1067 | { | 1325 | { |
1068 | int tag; | 1326 | int tag; |
1327 | int abt_tag; | ||
1069 | struct fnic_io_req *io_req; | 1328 | struct fnic_io_req *io_req; |
1070 | spinlock_t *io_lock; | 1329 | spinlock_t *io_lock; |
1071 | unsigned long flags; | 1330 | unsigned long flags; |
@@ -1075,13 +1334,14 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1075 | 1334 | ||
1076 | FNIC_SCSI_DBG(KERN_DEBUG, | 1335 | FNIC_SCSI_DBG(KERN_DEBUG, |
1077 | fnic->lport->host, | 1336 | fnic->lport->host, |
1078 | "fnic_rport_reset_exch called portid 0x%06x\n", | 1337 | "fnic_rport_exch_reset called portid 0x%06x\n", |
1079 | port_id); | 1338 | port_id); |
1080 | 1339 | ||
1081 | if (fnic->in_remove) | 1340 | if (fnic->in_remove) |
1082 | return; | 1341 | return; |
1083 | 1342 | ||
1084 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | 1343 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { |
1344 | abt_tag = tag; | ||
1085 | sc = scsi_host_find_tag(fnic->lport->host, tag); | 1345 | sc = scsi_host_find_tag(fnic->lport->host, tag); |
1086 | if (!sc) | 1346 | if (!sc) |
1087 | continue; | 1347 | continue; |
@@ -1096,6 +1356,15 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1096 | continue; | 1356 | continue; |
1097 | } | 1357 | } |
1098 | 1358 | ||
1359 | if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && | ||
1360 | (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { | ||
1361 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1362 | "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", | ||
1363 | sc); | ||
1364 | spin_unlock_irqrestore(io_lock, flags); | ||
1365 | continue; | ||
1366 | } | ||
1367 | |||
1099 | /* | 1368 | /* |
1100 | * Found IO that is still pending with firmware and | 1369 | * Found IO that is still pending with firmware and |
1101 | * belongs to rport that went away | 1370 | * belongs to rport that went away |
@@ -1104,9 +1373,29 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1104 | spin_unlock_irqrestore(io_lock, flags); | 1373 | spin_unlock_irqrestore(io_lock, flags); |
1105 | continue; | 1374 | continue; |
1106 | } | 1375 | } |
1376 | if (io_req->abts_done) { | ||
1377 | shost_printk(KERN_ERR, fnic->lport->host, | ||
1378 | "fnic_rport_exch_reset: io_req->abts_done is set " | ||
1379 | "state is %s\n", | ||
1380 | fnic_ioreq_state_to_str(CMD_STATE(sc))); | ||
1381 | } | ||
1382 | |||
1383 | if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { | ||
1384 | shost_printk(KERN_ERR, fnic->lport->host, | ||
1385 | "rport_exch_reset " | ||
1386 | "IO not yet issued %p tag 0x%x flags " | ||
1387 | "%x state %d\n", | ||
1388 | sc, tag, CMD_FLAGS(sc), CMD_STATE(sc)); | ||
1389 | } | ||
1107 | old_ioreq_state = CMD_STATE(sc); | 1390 | old_ioreq_state = CMD_STATE(sc); |
1108 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | 1391 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; |
1109 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | 1392 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; |
1393 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { | ||
1394 | abt_tag = (tag | FNIC_TAG_DEV_RST); | ||
1395 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1396 | "fnic_rport_exch_reset dev rst sc 0x%p\n", | ||
1397 | sc); | ||
1398 | } | ||
1110 | 1399 | ||
1111 | BUG_ON(io_req->abts_done); | 1400 | BUG_ON(io_req->abts_done); |
1112 | 1401 | ||
@@ -1118,7 +1407,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1118 | /* Now queue the abort command to firmware */ | 1407 | /* Now queue the abort command to firmware */ |
1119 | int_to_scsilun(sc->device->lun, &fc_lun); | 1408 | int_to_scsilun(sc->device->lun, &fc_lun); |
1120 | 1409 | ||
1121 | if (fnic_queue_abort_io_req(fnic, tag, | 1410 | if (fnic_queue_abort_io_req(fnic, abt_tag, |
1122 | FCPIO_ITMF_ABT_TASK_TERM, | 1411 | FCPIO_ITMF_ABT_TASK_TERM, |
1123 | fc_lun.scsi_lun, io_req)) { | 1412 | fc_lun.scsi_lun, io_req)) { |
1124 | /* | 1413 | /* |
@@ -1127,12 +1416,17 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1127 | * aborted later by scsi_eh, or cleaned up during | 1416 | * aborted later by scsi_eh, or cleaned up during |
1128 | * lun reset | 1417 | * lun reset |
1129 | */ | 1418 | */ |
1130 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1131 | |||
1132 | spin_lock_irqsave(io_lock, flags); | 1419 | spin_lock_irqsave(io_lock, flags); |
1133 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) | 1420 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) |
1134 | CMD_STATE(sc) = old_ioreq_state; | 1421 | CMD_STATE(sc) = old_ioreq_state; |
1135 | spin_unlock_irqrestore(io_lock, flags); | 1422 | spin_unlock_irqrestore(io_lock, flags); |
1423 | } else { | ||
1424 | spin_lock_irqsave(io_lock, flags); | ||
1425 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) | ||
1426 | CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; | ||
1427 | else | ||
1428 | CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; | ||
1429 | spin_unlock_irqrestore(io_lock, flags); | ||
1136 | } | 1430 | } |
1137 | } | 1431 | } |
1138 | 1432 | ||
@@ -1141,6 +1435,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1141 | void fnic_terminate_rport_io(struct fc_rport *rport) | 1435 | void fnic_terminate_rport_io(struct fc_rport *rport) |
1142 | { | 1436 | { |
1143 | int tag; | 1437 | int tag; |
1438 | int abt_tag; | ||
1144 | struct fnic_io_req *io_req; | 1439 | struct fnic_io_req *io_req; |
1145 | spinlock_t *io_lock; | 1440 | spinlock_t *io_lock; |
1146 | unsigned long flags; | 1441 | unsigned long flags; |
@@ -1154,14 +1449,15 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1154 | 1449 | ||
1155 | FNIC_SCSI_DBG(KERN_DEBUG, | 1450 | FNIC_SCSI_DBG(KERN_DEBUG, |
1156 | fnic->lport->host, "fnic_terminate_rport_io called" | 1451 | fnic->lport->host, "fnic_terminate_rport_io called" |
1157 | " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n", | 1452 | " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", |
1158 | rport->port_name, rport->node_name, | 1453 | rport->port_name, rport->node_name, rport, |
1159 | rport->port_id); | 1454 | rport->port_id); |
1160 | 1455 | ||
1161 | if (fnic->in_remove) | 1456 | if (fnic->in_remove) |
1162 | return; | 1457 | return; |
1163 | 1458 | ||
1164 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | 1459 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { |
1460 | abt_tag = tag; | ||
1165 | sc = scsi_host_find_tag(fnic->lport->host, tag); | 1461 | sc = scsi_host_find_tag(fnic->lport->host, tag); |
1166 | if (!sc) | 1462 | if (!sc) |
1167 | continue; | 1463 | continue; |
@@ -1180,6 +1476,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1180 | continue; | 1476 | continue; |
1181 | } | 1477 | } |
1182 | 1478 | ||
1479 | if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && | ||
1480 | (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { | ||
1481 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1482 | "fnic_terminate_rport_io dev rst not pending sc 0x%p\n", | ||
1483 | sc); | ||
1484 | spin_unlock_irqrestore(io_lock, flags); | ||
1485 | continue; | ||
1486 | } | ||
1183 | /* | 1487 | /* |
1184 | * Found IO that is still pending with firmware and | 1488 | * Found IO that is still pending with firmware and |
1185 | * belongs to rport that went away | 1489 | * belongs to rport that went away |
@@ -1188,9 +1492,27 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1188 | spin_unlock_irqrestore(io_lock, flags); | 1492 | spin_unlock_irqrestore(io_lock, flags); |
1189 | continue; | 1493 | continue; |
1190 | } | 1494 | } |
1495 | if (io_req->abts_done) { | ||
1496 | shost_printk(KERN_ERR, fnic->lport->host, | ||
1497 | "fnic_terminate_rport_io: io_req->abts_done is set " | ||
1498 | "state is %s\n", | ||
1499 | fnic_ioreq_state_to_str(CMD_STATE(sc))); | ||
1500 | } | ||
1501 | if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { | ||
1502 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
1503 | "fnic_terminate_rport_io " | ||
1504 | "IO not yet issued %p tag 0x%x flags " | ||
1505 | "%x state %d\n", | ||
1506 | sc, tag, CMD_FLAGS(sc), CMD_STATE(sc)); | ||
1507 | } | ||
1191 | old_ioreq_state = CMD_STATE(sc); | 1508 | old_ioreq_state = CMD_STATE(sc); |
1192 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | 1509 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; |
1193 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | 1510 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; |
1511 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { | ||
1512 | abt_tag = (tag | FNIC_TAG_DEV_RST); | ||
1513 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1514 | "fnic_terminate_rport_io dev rst sc 0x%p\n", sc); | ||
1515 | } | ||
1194 | 1516 | ||
1195 | BUG_ON(io_req->abts_done); | 1517 | BUG_ON(io_req->abts_done); |
1196 | 1518 | ||
@@ -1203,7 +1525,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1203 | /* Now queue the abort command to firmware */ | 1525 | /* Now queue the abort command to firmware */ |
1204 | int_to_scsilun(sc->device->lun, &fc_lun); | 1526 | int_to_scsilun(sc->device->lun, &fc_lun); |
1205 | 1527 | ||
1206 | if (fnic_queue_abort_io_req(fnic, tag, | 1528 | if (fnic_queue_abort_io_req(fnic, abt_tag, |
1207 | FCPIO_ITMF_ABT_TASK_TERM, | 1529 | FCPIO_ITMF_ABT_TASK_TERM, |
1208 | fc_lun.scsi_lun, io_req)) { | 1530 | fc_lun.scsi_lun, io_req)) { |
1209 | /* | 1531 | /* |
@@ -1212,12 +1534,17 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1212 | * aborted later by scsi_eh, or cleaned up during | 1534 | * aborted later by scsi_eh, or cleaned up during |
1213 | * lun reset | 1535 | * lun reset |
1214 | */ | 1536 | */ |
1215 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1216 | |||
1217 | spin_lock_irqsave(io_lock, flags); | 1537 | spin_lock_irqsave(io_lock, flags); |
1218 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) | 1538 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) |
1219 | CMD_STATE(sc) = old_ioreq_state; | 1539 | CMD_STATE(sc) = old_ioreq_state; |
1220 | spin_unlock_irqrestore(io_lock, flags); | 1540 | spin_unlock_irqrestore(io_lock, flags); |
1541 | } else { | ||
1542 | spin_lock_irqsave(io_lock, flags); | ||
1543 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) | ||
1544 | CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; | ||
1545 | else | ||
1546 | CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; | ||
1547 | spin_unlock_irqrestore(io_lock, flags); | ||
1221 | } | 1548 | } |
1222 | } | 1549 | } |
1223 | 1550 | ||
@@ -1232,13 +1559,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1232 | { | 1559 | { |
1233 | struct fc_lport *lp; | 1560 | struct fc_lport *lp; |
1234 | struct fnic *fnic; | 1561 | struct fnic *fnic; |
1235 | struct fnic_io_req *io_req; | 1562 | struct fnic_io_req *io_req = NULL; |
1236 | struct fc_rport *rport; | 1563 | struct fc_rport *rport; |
1237 | spinlock_t *io_lock; | 1564 | spinlock_t *io_lock; |
1238 | unsigned long flags; | 1565 | unsigned long flags; |
1566 | unsigned long start_time = 0; | ||
1239 | int ret = SUCCESS; | 1567 | int ret = SUCCESS; |
1240 | u32 task_req; | 1568 | u32 task_req = 0; |
1241 | struct scsi_lun fc_lun; | 1569 | struct scsi_lun fc_lun; |
1570 | int tag; | ||
1242 | DECLARE_COMPLETION_ONSTACK(tm_done); | 1571 | DECLARE_COMPLETION_ONSTACK(tm_done); |
1243 | 1572 | ||
1244 | /* Wait for rport to unblock */ | 1573 | /* Wait for rport to unblock */ |
@@ -1249,9 +1578,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1249 | 1578 | ||
1250 | fnic = lport_priv(lp); | 1579 | fnic = lport_priv(lp); |
1251 | rport = starget_to_rport(scsi_target(sc->device)); | 1580 | rport = starget_to_rport(scsi_target(sc->device)); |
1252 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 1581 | tag = sc->request->tag; |
1253 | "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n", | 1582 | FNIC_SCSI_DBG(KERN_DEBUG, |
1254 | rport->port_id, sc->device->lun, sc->request->tag); | 1583 | fnic->lport->host, |
1584 | "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n", | ||
1585 | rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc)); | ||
1586 | |||
1587 | CMD_FLAGS(sc) = FNIC_NO_FLAGS; | ||
1255 | 1588 | ||
1256 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) { | 1589 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) { |
1257 | ret = FAILED; | 1590 | ret = FAILED; |
@@ -1318,6 +1651,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1318 | ret = FAILED; | 1651 | ret = FAILED; |
1319 | goto fnic_abort_cmd_end; | 1652 | goto fnic_abort_cmd_end; |
1320 | } | 1653 | } |
1654 | if (task_req == FCPIO_ITMF_ABT_TASK) | ||
1655 | CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; | ||
1656 | else | ||
1657 | CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; | ||
1321 | 1658 | ||
1322 | /* | 1659 | /* |
1323 | * We queued an abort IO, wait for its completion. | 1660 | * We queued an abort IO, wait for its completion. |
@@ -1336,6 +1673,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1336 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1673 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1337 | if (!io_req) { | 1674 | if (!io_req) { |
1338 | spin_unlock_irqrestore(io_lock, flags); | 1675 | spin_unlock_irqrestore(io_lock, flags); |
1676 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; | ||
1339 | ret = FAILED; | 1677 | ret = FAILED; |
1340 | goto fnic_abort_cmd_end; | 1678 | goto fnic_abort_cmd_end; |
1341 | } | 1679 | } |
@@ -1344,6 +1682,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1344 | /* fw did not complete abort, timed out */ | 1682 | /* fw did not complete abort, timed out */ |
1345 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | 1683 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { |
1346 | spin_unlock_irqrestore(io_lock, flags); | 1684 | spin_unlock_irqrestore(io_lock, flags); |
1685 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; | ||
1347 | ret = FAILED; | 1686 | ret = FAILED; |
1348 | goto fnic_abort_cmd_end; | 1687 | goto fnic_abort_cmd_end; |
1349 | } | 1688 | } |
@@ -1359,12 +1698,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1359 | 1698 | ||
1360 | spin_unlock_irqrestore(io_lock, flags); | 1699 | spin_unlock_irqrestore(io_lock, flags); |
1361 | 1700 | ||
1701 | start_time = io_req->start_time; | ||
1362 | fnic_release_ioreq_buf(fnic, io_req, sc); | 1702 | fnic_release_ioreq_buf(fnic, io_req, sc); |
1363 | mempool_free(io_req, fnic->io_req_pool); | 1703 | mempool_free(io_req, fnic->io_req_pool); |
1364 | 1704 | ||
1365 | fnic_abort_cmd_end: | 1705 | fnic_abort_cmd_end: |
1706 | FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, | ||
1707 | sc->request->tag, sc, | ||
1708 | jiffies_to_msecs(jiffies - start_time), | ||
1709 | 0, ((u64)sc->cmnd[0] << 32 | | ||
1710 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | | ||
1711 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), | ||
1712 | (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); | ||
1713 | |||
1366 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 1714 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1367 | "Returning from abort cmd %s\n", | 1715 | "Returning from abort cmd type %x %s\n", task_req, |
1368 | (ret == SUCCESS) ? | 1716 | (ret == SUCCESS) ? |
1369 | "SUCCESS" : "FAILED"); | 1717 | "SUCCESS" : "FAILED"); |
1370 | return ret; | 1718 | return ret; |
@@ -1375,16 +1723,28 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, | |||
1375 | struct fnic_io_req *io_req) | 1723 | struct fnic_io_req *io_req) |
1376 | { | 1724 | { |
1377 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; | 1725 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; |
1726 | struct Scsi_Host *host = fnic->lport->host; | ||
1378 | struct scsi_lun fc_lun; | 1727 | struct scsi_lun fc_lun; |
1379 | int ret = 0; | 1728 | int ret = 0; |
1380 | unsigned long intr_flags; | 1729 | unsigned long intr_flags; |
1381 | 1730 | ||
1731 | spin_lock_irqsave(host->host_lock, intr_flags); | ||
1732 | if (unlikely(fnic_chk_state_flags_locked(fnic, | ||
1733 | FNIC_FLAGS_IO_BLOCKED))) { | ||
1734 | spin_unlock_irqrestore(host->host_lock, intr_flags); | ||
1735 | return FAILED; | ||
1736 | } else | ||
1737 | atomic_inc(&fnic->in_flight); | ||
1738 | spin_unlock_irqrestore(host->host_lock, intr_flags); | ||
1739 | |||
1382 | spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); | 1740 | spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); |
1383 | 1741 | ||
1384 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) | 1742 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) |
1385 | free_wq_copy_descs(fnic, wq); | 1743 | free_wq_copy_descs(fnic, wq); |
1386 | 1744 | ||
1387 | if (!vnic_wq_copy_desc_avail(wq)) { | 1745 | if (!vnic_wq_copy_desc_avail(wq)) { |
1746 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1747 | "queue_dr_io_req failure - no descriptors\n"); | ||
1388 | ret = -EAGAIN; | 1748 | ret = -EAGAIN; |
1389 | goto lr_io_req_end; | 1749 | goto lr_io_req_end; |
1390 | } | 1750 | } |
@@ -1399,6 +1759,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, | |||
1399 | 1759 | ||
1400 | lr_io_req_end: | 1760 | lr_io_req_end: |
1401 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); | 1761 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); |
1762 | atomic_dec(&fnic->in_flight); | ||
1402 | 1763 | ||
1403 | return ret; | 1764 | return ret; |
1404 | } | 1765 | } |
@@ -1412,7 +1773,7 @@ lr_io_req_end: | |||
1412 | static int fnic_clean_pending_aborts(struct fnic *fnic, | 1773 | static int fnic_clean_pending_aborts(struct fnic *fnic, |
1413 | struct scsi_cmnd *lr_sc) | 1774 | struct scsi_cmnd *lr_sc) |
1414 | { | 1775 | { |
1415 | int tag; | 1776 | int tag, abt_tag; |
1416 | struct fnic_io_req *io_req; | 1777 | struct fnic_io_req *io_req; |
1417 | spinlock_t *io_lock; | 1778 | spinlock_t *io_lock; |
1418 | unsigned long flags; | 1779 | unsigned long flags; |
@@ -1421,6 +1782,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1421 | struct scsi_lun fc_lun; | 1782 | struct scsi_lun fc_lun; |
1422 | struct scsi_device *lun_dev = lr_sc->device; | 1783 | struct scsi_device *lun_dev = lr_sc->device; |
1423 | DECLARE_COMPLETION_ONSTACK(tm_done); | 1784 | DECLARE_COMPLETION_ONSTACK(tm_done); |
1785 | enum fnic_ioreq_state old_ioreq_state; | ||
1424 | 1786 | ||
1425 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | 1787 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { |
1426 | sc = scsi_host_find_tag(fnic->lport->host, tag); | 1788 | sc = scsi_host_find_tag(fnic->lport->host, tag); |
@@ -1449,7 +1811,41 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1449 | "Found IO in %s on lun\n", | 1811 | "Found IO in %s on lun\n", |
1450 | fnic_ioreq_state_to_str(CMD_STATE(sc))); | 1812 | fnic_ioreq_state_to_str(CMD_STATE(sc))); |
1451 | 1813 | ||
1452 | BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING); | 1814 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { |
1815 | spin_unlock_irqrestore(io_lock, flags); | ||
1816 | continue; | ||
1817 | } | ||
1818 | if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && | ||
1819 | (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { | ||
1820 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
1821 | "%s dev rst not pending sc 0x%p\n", __func__, | ||
1822 | sc); | ||
1823 | spin_unlock_irqrestore(io_lock, flags); | ||
1824 | continue; | ||
1825 | } | ||
1826 | old_ioreq_state = CMD_STATE(sc); | ||
1827 | /* | ||
1828 | * Any pending IO issued prior to reset is expected to be | ||
1829 | * in abts pending state, if not we need to set | ||
1830 | * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending. | ||
1831 | * When IO is completed, the IO will be handed over and | ||
1832 | * handled in this function. | ||
1833 | */ | ||
1834 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | ||
1835 | |||
1836 | if (io_req->abts_done) | ||
1837 | shost_printk(KERN_ERR, fnic->lport->host, | ||
1838 | "%s: io_req->abts_done is set state is %s\n", | ||
1839 | __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); | ||
1840 | |||
1841 | BUG_ON(io_req->abts_done); | ||
1842 | |||
1843 | abt_tag = tag; | ||
1844 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { | ||
1845 | abt_tag |= FNIC_TAG_DEV_RST; | ||
1846 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
1847 | "%s: dev rst sc 0x%p\n", __func__, sc); | ||
1848 | } | ||
1453 | 1849 | ||
1454 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | 1850 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; |
1455 | io_req->abts_done = &tm_done; | 1851 | io_req->abts_done = &tm_done; |
@@ -1458,17 +1854,25 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1458 | /* Now queue the abort command to firmware */ | 1854 | /* Now queue the abort command to firmware */ |
1459 | int_to_scsilun(sc->device->lun, &fc_lun); | 1855 | int_to_scsilun(sc->device->lun, &fc_lun); |
1460 | 1856 | ||
1461 | if (fnic_queue_abort_io_req(fnic, tag, | 1857 | if (fnic_queue_abort_io_req(fnic, abt_tag, |
1462 | FCPIO_ITMF_ABT_TASK_TERM, | 1858 | FCPIO_ITMF_ABT_TASK_TERM, |
1463 | fc_lun.scsi_lun, io_req)) { | 1859 | fc_lun.scsi_lun, io_req)) { |
1464 | spin_lock_irqsave(io_lock, flags); | 1860 | spin_lock_irqsave(io_lock, flags); |
1465 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1861 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1466 | if (io_req) | 1862 | if (io_req) |
1467 | io_req->abts_done = NULL; | 1863 | io_req->abts_done = NULL; |
1864 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) | ||
1865 | CMD_STATE(sc) = old_ioreq_state; | ||
1468 | spin_unlock_irqrestore(io_lock, flags); | 1866 | spin_unlock_irqrestore(io_lock, flags); |
1469 | ret = 1; | 1867 | ret = 1; |
1470 | goto clean_pending_aborts_end; | 1868 | goto clean_pending_aborts_end; |
1869 | } else { | ||
1870 | spin_lock_irqsave(io_lock, flags); | ||
1871 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) | ||
1872 | CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; | ||
1873 | spin_unlock_irqrestore(io_lock, flags); | ||
1471 | } | 1874 | } |
1875 | CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; | ||
1472 | 1876 | ||
1473 | wait_for_completion_timeout(&tm_done, | 1877 | wait_for_completion_timeout(&tm_done, |
1474 | msecs_to_jiffies | 1878 | msecs_to_jiffies |
@@ -1479,8 +1883,8 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1479 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1883 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1480 | if (!io_req) { | 1884 | if (!io_req) { |
1481 | spin_unlock_irqrestore(io_lock, flags); | 1885 | spin_unlock_irqrestore(io_lock, flags); |
1482 | ret = 1; | 1886 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; |
1483 | goto clean_pending_aborts_end; | 1887 | continue; |
1484 | } | 1888 | } |
1485 | 1889 | ||
1486 | io_req->abts_done = NULL; | 1890 | io_req->abts_done = NULL; |
@@ -1488,6 +1892,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1488 | /* if abort is still pending with fw, fail */ | 1892 | /* if abort is still pending with fw, fail */ |
1489 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | 1893 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { |
1490 | spin_unlock_irqrestore(io_lock, flags); | 1894 | spin_unlock_irqrestore(io_lock, flags); |
1895 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; | ||
1491 | ret = 1; | 1896 | ret = 1; |
1492 | goto clean_pending_aborts_end; | 1897 | goto clean_pending_aborts_end; |
1493 | } | 1898 | } |
@@ -1498,10 +1903,75 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1498 | mempool_free(io_req, fnic->io_req_pool); | 1903 | mempool_free(io_req, fnic->io_req_pool); |
1499 | } | 1904 | } |
1500 | 1905 | ||
1906 | schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); | ||
1907 | |||
1908 | /* walk again to check, if IOs are still pending in fw */ | ||
1909 | if (fnic_is_abts_pending(fnic, lr_sc)) | ||
1910 | ret = FAILED; | ||
1911 | |||
1501 | clean_pending_aborts_end: | 1912 | clean_pending_aborts_end: |
1502 | return ret; | 1913 | return ret; |
1503 | } | 1914 | } |
1504 | 1915 | ||
1916 | /** | ||
1917 | * fnic_scsi_host_start_tag | ||
1918 | * Allocates tagid from host's tag list | ||
1919 | **/ | ||
1920 | static inline int | ||
1921 | fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc) | ||
1922 | { | ||
1923 | struct blk_queue_tag *bqt = fnic->lport->host->bqt; | ||
1924 | int tag, ret = SCSI_NO_TAG; | ||
1925 | |||
1926 | BUG_ON(!bqt); | ||
1927 | if (!bqt) { | ||
1928 | pr_err("Tags are not supported\n"); | ||
1929 | goto end; | ||
1930 | } | ||
1931 | |||
1932 | do { | ||
1933 | tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1); | ||
1934 | if (tag >= bqt->max_depth) { | ||
1935 | pr_err("Tag allocation failure\n"); | ||
1936 | goto end; | ||
1937 | } | ||
1938 | } while (test_and_set_bit(tag, bqt->tag_map)); | ||
1939 | |||
1940 | bqt->tag_index[tag] = sc->request; | ||
1941 | sc->request->tag = tag; | ||
1942 | sc->tag = tag; | ||
1943 | if (!sc->request->special) | ||
1944 | sc->request->special = sc; | ||
1945 | |||
1946 | ret = tag; | ||
1947 | |||
1948 | end: | ||
1949 | return ret; | ||
1950 | } | ||
1951 | |||
1952 | /** | ||
1953 | * fnic_scsi_host_end_tag | ||
1954 | * frees tag allocated by fnic_scsi_host_start_tag. | ||
1955 | **/ | ||
1956 | static inline void | ||
1957 | fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc) | ||
1958 | { | ||
1959 | struct blk_queue_tag *bqt = fnic->lport->host->bqt; | ||
1960 | int tag = sc->request->tag; | ||
1961 | |||
1962 | if (tag == SCSI_NO_TAG) | ||
1963 | return; | ||
1964 | |||
1965 | BUG_ON(!bqt || !bqt->tag_index[tag]); | ||
1966 | if (!bqt) | ||
1967 | return; | ||
1968 | |||
1969 | bqt->tag_index[tag] = NULL; | ||
1970 | clear_bit(tag, bqt->tag_map); | ||
1971 | |||
1972 | return; | ||
1973 | } | ||
1974 | |||
1505 | /* | 1975 | /* |
1506 | * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN | 1976 | * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN |
1507 | * fail to get aborted. It calls driver's eh_device_reset with a SCSI command | 1977 | * fail to get aborted. It calls driver's eh_device_reset with a SCSI command |
@@ -1511,13 +1981,17 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
1511 | { | 1981 | { |
1512 | struct fc_lport *lp; | 1982 | struct fc_lport *lp; |
1513 | struct fnic *fnic; | 1983 | struct fnic *fnic; |
1514 | struct fnic_io_req *io_req; | 1984 | struct fnic_io_req *io_req = NULL; |
1515 | struct fc_rport *rport; | 1985 | struct fc_rport *rport; |
1516 | int status; | 1986 | int status; |
1517 | int ret = FAILED; | 1987 | int ret = FAILED; |
1518 | spinlock_t *io_lock; | 1988 | spinlock_t *io_lock; |
1519 | unsigned long flags; | 1989 | unsigned long flags; |
1990 | unsigned long start_time = 0; | ||
1991 | struct scsi_lun fc_lun; | ||
1992 | int tag = 0; | ||
1520 | DECLARE_COMPLETION_ONSTACK(tm_done); | 1993 | DECLARE_COMPLETION_ONSTACK(tm_done); |
1994 | int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ | ||
1521 | 1995 | ||
1522 | /* Wait for rport to unblock */ | 1996 | /* Wait for rport to unblock */ |
1523 | fc_block_scsi_eh(sc); | 1997 | fc_block_scsi_eh(sc); |
@@ -1529,8 +2003,8 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
1529 | 2003 | ||
1530 | rport = starget_to_rport(scsi_target(sc->device)); | 2004 | rport = starget_to_rport(scsi_target(sc->device)); |
1531 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 2005 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1532 | "Device reset called FCID 0x%x, LUN 0x%x\n", | 2006 | "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n", |
1533 | rport->port_id, sc->device->lun); | 2007 | rport->port_id, sc->device->lun, sc); |
1534 | 2008 | ||
1535 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) | 2009 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) |
1536 | goto fnic_device_reset_end; | 2010 | goto fnic_device_reset_end; |
@@ -1539,6 +2013,16 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
1539 | if (fc_remote_port_chkready(rport)) | 2013 | if (fc_remote_port_chkready(rport)) |
1540 | goto fnic_device_reset_end; | 2014 | goto fnic_device_reset_end; |
1541 | 2015 | ||
2016 | CMD_FLAGS(sc) = FNIC_DEVICE_RESET; | ||
2017 | /* Allocate tag if not present */ | ||
2018 | |||
2019 | tag = sc->request->tag; | ||
2020 | if (unlikely(tag < 0)) { | ||
2021 | tag = fnic_scsi_host_start_tag(fnic, sc); | ||
2022 | if (unlikely(tag == SCSI_NO_TAG)) | ||
2023 | goto fnic_device_reset_end; | ||
2024 | tag_gen_flag = 1; | ||
2025 | } | ||
1542 | io_lock = fnic_io_lock_hash(fnic, sc); | 2026 | io_lock = fnic_io_lock_hash(fnic, sc); |
1543 | spin_lock_irqsave(io_lock, flags); | 2027 | spin_lock_irqsave(io_lock, flags); |
1544 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 2028 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
@@ -1562,8 +2046,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
1562 | CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; | 2046 | CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; |
1563 | spin_unlock_irqrestore(io_lock, flags); | 2047 | spin_unlock_irqrestore(io_lock, flags); |
1564 | 2048 | ||
1565 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n", | 2049 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); |
1566 | sc->request->tag); | ||
1567 | 2050 | ||
1568 | /* | 2051 | /* |
1569 | * issue the device reset, if enqueue failed, clean up the ioreq | 2052 | * issue the device reset, if enqueue failed, clean up the ioreq |
@@ -1576,6 +2059,9 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
1576 | io_req->dr_done = NULL; | 2059 | io_req->dr_done = NULL; |
1577 | goto fnic_device_reset_clean; | 2060 | goto fnic_device_reset_clean; |
1578 | } | 2061 | } |
2062 | spin_lock_irqsave(io_lock, flags); | ||
2063 | CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED; | ||
2064 | spin_unlock_irqrestore(io_lock, flags); | ||
1579 | 2065 | ||
1580 | /* | 2066 | /* |
1581 | * Wait on the local completion for LUN reset. The io_req may be | 2067 | * Wait on the local completion for LUN reset. The io_req may be |
@@ -1588,12 +2074,13 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
1588 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 2074 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1589 | if (!io_req) { | 2075 | if (!io_req) { |
1590 | spin_unlock_irqrestore(io_lock, flags); | 2076 | spin_unlock_irqrestore(io_lock, flags); |
2077 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
2078 | "io_req is null tag 0x%x sc 0x%p\n", tag, sc); | ||
1591 | goto fnic_device_reset_end; | 2079 | goto fnic_device_reset_end; |
1592 | } | 2080 | } |
1593 | io_req->dr_done = NULL; | 2081 | io_req->dr_done = NULL; |
1594 | 2082 | ||
1595 | status = CMD_LR_STATUS(sc); | 2083 | status = CMD_LR_STATUS(sc); |
1596 | spin_unlock_irqrestore(io_lock, flags); | ||
1597 | 2084 | ||
1598 | /* | 2085 | /* |
1599 | * If lun reset not completed, bail out with failed. io_req | 2086 | * If lun reset not completed, bail out with failed. io_req |
@@ -1602,7 +2089,53 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
1602 | if (status == FCPIO_INVALID_CODE) { | 2089 | if (status == FCPIO_INVALID_CODE) { |
1603 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 2090 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1604 | "Device reset timed out\n"); | 2091 | "Device reset timed out\n"); |
1605 | goto fnic_device_reset_end; | 2092 | CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; |
2093 | spin_unlock_irqrestore(io_lock, flags); | ||
2094 | int_to_scsilun(sc->device->lun, &fc_lun); | ||
2095 | /* | ||
2096 | * Issue abort and terminate on the device reset request. | ||
2097 | * If q'ing of the abort fails, retry issue it after a delay. | ||
2098 | */ | ||
2099 | while (1) { | ||
2100 | spin_lock_irqsave(io_lock, flags); | ||
2101 | if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) { | ||
2102 | spin_unlock_irqrestore(io_lock, flags); | ||
2103 | break; | ||
2104 | } | ||
2105 | spin_unlock_irqrestore(io_lock, flags); | ||
2106 | if (fnic_queue_abort_io_req(fnic, | ||
2107 | tag | FNIC_TAG_DEV_RST, | ||
2108 | FCPIO_ITMF_ABT_TASK_TERM, | ||
2109 | fc_lun.scsi_lun, io_req)) { | ||
2110 | wait_for_completion_timeout(&tm_done, | ||
2111 | msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); | ||
2112 | } else { | ||
2113 | spin_lock_irqsave(io_lock, flags); | ||
2114 | CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; | ||
2115 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | ||
2116 | io_req->abts_done = &tm_done; | ||
2117 | spin_unlock_irqrestore(io_lock, flags); | ||
2118 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
2119 | "Abort and terminate issued on Device reset " | ||
2120 | "tag 0x%x sc 0x%p\n", tag, sc); | ||
2121 | break; | ||
2122 | } | ||
2123 | } | ||
2124 | while (1) { | ||
2125 | spin_lock_irqsave(io_lock, flags); | ||
2126 | if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { | ||
2127 | spin_unlock_irqrestore(io_lock, flags); | ||
2128 | wait_for_completion_timeout(&tm_done, | ||
2129 | msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); | ||
2130 | break; | ||
2131 | } else { | ||
2132 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
2133 | io_req->abts_done = NULL; | ||
2134 | goto fnic_device_reset_clean; | ||
2135 | } | ||
2136 | } | ||
2137 | } else { | ||
2138 | spin_unlock_irqrestore(io_lock, flags); | ||
1606 | } | 2139 | } |
1607 | 2140 | ||
1608 | /* Completed, but not successful, clean up the io_req, return fail */ | 2141 | /* Completed, but not successful, clean up the io_req, return fail */ |
@@ -1645,11 +2178,24 @@ fnic_device_reset_clean: | |||
1645 | spin_unlock_irqrestore(io_lock, flags); | 2178 | spin_unlock_irqrestore(io_lock, flags); |
1646 | 2179 | ||
1647 | if (io_req) { | 2180 | if (io_req) { |
2181 | start_time = io_req->start_time; | ||
1648 | fnic_release_ioreq_buf(fnic, io_req, sc); | 2182 | fnic_release_ioreq_buf(fnic, io_req, sc); |
1649 | mempool_free(io_req, fnic->io_req_pool); | 2183 | mempool_free(io_req, fnic->io_req_pool); |
1650 | } | 2184 | } |
1651 | 2185 | ||
1652 | fnic_device_reset_end: | 2186 | fnic_device_reset_end: |
2187 | FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, | ||
2188 | sc->request->tag, sc, | ||
2189 | jiffies_to_msecs(jiffies - start_time), | ||
2190 | 0, ((u64)sc->cmnd[0] << 32 | | ||
2191 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | | ||
2192 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), | ||
2193 | (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); | ||
2194 | |||
2195 | /* free tag if it is allocated */ | ||
2196 | if (unlikely(tag_gen_flag)) | ||
2197 | fnic_scsi_host_end_tag(fnic, sc); | ||
2198 | |||
1653 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 2199 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1654 | "Returning from device reset %s\n", | 2200 | "Returning from device reset %s\n", |
1655 | (ret == SUCCESS) ? | 2201 | (ret == SUCCESS) ? |
@@ -1735,7 +2281,15 @@ void fnic_scsi_abort_io(struct fc_lport *lp) | |||
1735 | DECLARE_COMPLETION_ONSTACK(remove_wait); | 2281 | DECLARE_COMPLETION_ONSTACK(remove_wait); |
1736 | 2282 | ||
1737 | /* Issue firmware reset for fnic, wait for reset to complete */ | 2283 | /* Issue firmware reset for fnic, wait for reset to complete */ |
2284 | retry_fw_reset: | ||
1738 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 2285 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
2286 | if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { | ||
2287 | /* fw reset is in progress, poll for its completion */ | ||
2288 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
2289 | schedule_timeout(msecs_to_jiffies(100)); | ||
2290 | goto retry_fw_reset; | ||
2291 | } | ||
2292 | |||
1739 | fnic->remove_wait = &remove_wait; | 2293 | fnic->remove_wait = &remove_wait; |
1740 | old_state = fnic->state; | 2294 | old_state = fnic->state; |
1741 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | 2295 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; |
@@ -1776,7 +2330,14 @@ void fnic_scsi_cleanup(struct fc_lport *lp) | |||
1776 | struct fnic *fnic = lport_priv(lp); | 2330 | struct fnic *fnic = lport_priv(lp); |
1777 | 2331 | ||
1778 | /* issue fw reset */ | 2332 | /* issue fw reset */ |
2333 | retry_fw_reset: | ||
1779 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 2334 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
2335 | if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { | ||
2336 | /* fw reset is in progress, poll for its completion */ | ||
2337 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
2338 | schedule_timeout(msecs_to_jiffies(100)); | ||
2339 | goto retry_fw_reset; | ||
2340 | } | ||
1780 | old_state = fnic->state; | 2341 | old_state = fnic->state; |
1781 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | 2342 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; |
1782 | fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); | 2343 | fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); |
@@ -1822,3 +2383,61 @@ call_fc_exch_mgr_reset: | |||
1822 | fc_exch_mgr_reset(lp, sid, did); | 2383 | fc_exch_mgr_reset(lp, sid, did); |
1823 | 2384 | ||
1824 | } | 2385 | } |
2386 | |||
2387 | /* | ||
2388 | * fnic_is_abts_pending() is a helper function that | ||
2389 | * walks through tag map to check if there is any IOs pending,if there is one, | ||
2390 | * then it returns 1 (true), otherwise 0 (false) | ||
2391 | * if @lr_sc is non NULL, then it checks IOs specific to particular LUN, | ||
2392 | * otherwise, it checks for all IOs. | ||
2393 | */ | ||
2394 | int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) | ||
2395 | { | ||
2396 | int tag; | ||
2397 | struct fnic_io_req *io_req; | ||
2398 | spinlock_t *io_lock; | ||
2399 | unsigned long flags; | ||
2400 | int ret = 0; | ||
2401 | struct scsi_cmnd *sc; | ||
2402 | struct scsi_device *lun_dev = NULL; | ||
2403 | |||
2404 | if (lr_sc) | ||
2405 | lun_dev = lr_sc->device; | ||
2406 | |||
2407 | /* walk again to check, if IOs are still pending in fw */ | ||
2408 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | ||
2409 | sc = scsi_host_find_tag(fnic->lport->host, tag); | ||
2410 | /* | ||
2411 | * ignore this lun reset cmd or cmds that do not belong to | ||
2412 | * this lun | ||
2413 | */ | ||
2414 | if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc))) | ||
2415 | continue; | ||
2416 | |||
2417 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
2418 | spin_lock_irqsave(io_lock, flags); | ||
2419 | |||
2420 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
2421 | |||
2422 | if (!io_req || sc->device != lun_dev) { | ||
2423 | spin_unlock_irqrestore(io_lock, flags); | ||
2424 | continue; | ||
2425 | } | ||
2426 | |||
2427 | /* | ||
2428 | * Found IO that is still pending with firmware and | ||
2429 | * belongs to the LUN that we are resetting | ||
2430 | */ | ||
2431 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
2432 | "Found IO in %s on lun\n", | ||
2433 | fnic_ioreq_state_to_str(CMD_STATE(sc))); | ||
2434 | |||
2435 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | ||
2436 | spin_unlock_irqrestore(io_lock, flags); | ||
2437 | ret = 1; | ||
2438 | continue; | ||
2439 | } | ||
2440 | } | ||
2441 | |||
2442 | return ret; | ||
2443 | } | ||
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c new file mode 100644 index 000000000000..23a60e3d8527 --- /dev/null +++ b/drivers/scsi/fnic/fnic_trace.c | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Cisco Systems, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you may redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; version 2 of the License. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
10 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
11 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
12 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
14 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
15 | * SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/mempool.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/kallsyms.h> | ||
23 | #include "fnic_io.h" | ||
24 | #include "fnic.h" | ||
25 | |||
26 | unsigned int trace_max_pages; | ||
27 | static int fnic_max_trace_entries; | ||
28 | |||
29 | static unsigned long fnic_trace_buf_p; | ||
30 | static DEFINE_SPINLOCK(fnic_trace_lock); | ||
31 | |||
32 | static fnic_trace_dbg_t fnic_trace_entries; | ||
33 | int fnic_tracing_enabled = 1; | ||
34 | |||
35 | /* | ||
36 | * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information | ||
37 | * | ||
38 | * Description: | ||
39 | * This routine gets next available trace buffer entry location @wr_idx | ||
40 | * from allocated trace buffer pages and give that memory location | ||
41 | * to user to store the trace information. | ||
42 | * | ||
43 | * Return Value: | ||
44 | * This routine returns pointer to next available trace entry | ||
45 | * @fnic_buf_head for user to fill trace information. | ||
46 | */ | ||
47 | fnic_trace_data_t *fnic_trace_get_buf(void) | ||
48 | { | ||
49 | unsigned long fnic_buf_head; | ||
50 | unsigned long flags; | ||
51 | |||
52 | spin_lock_irqsave(&fnic_trace_lock, flags); | ||
53 | |||
54 | /* | ||
55 | * Get next available memory location for writing trace information | ||
56 | * at @wr_idx and increment @wr_idx | ||
57 | */ | ||
58 | fnic_buf_head = | ||
59 | fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; | ||
60 | fnic_trace_entries.wr_idx++; | ||
61 | |||
62 | /* | ||
63 | * Verify if trace buffer is full then change wd_idx to | ||
64 | * start from zero | ||
65 | */ | ||
66 | if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries) | ||
67 | fnic_trace_entries.wr_idx = 0; | ||
68 | |||
69 | /* | ||
70 | * Verify if write index @wr_idx and read index @rd_idx are same then | ||
71 | * increment @rd_idx to move to next entry in trace buffer | ||
72 | */ | ||
73 | if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) { | ||
74 | fnic_trace_entries.rd_idx++; | ||
75 | if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries) | ||
76 | fnic_trace_entries.rd_idx = 0; | ||
77 | } | ||
78 | spin_unlock_irqrestore(&fnic_trace_lock, flags); | ||
79 | return (fnic_trace_data_t *)fnic_buf_head; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * fnic_get_trace_data - Copy trace buffer to a memory file | ||
84 | * @fnic_dbgfs_t: pointer to debugfs trace buffer | ||
85 | * | ||
86 | * Description: | ||
87 | * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t | ||
88 | * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in | ||
89 | * the log and process the log until the end of the buffer. Then it will gather | ||
90 | * from the beginning of the log and process until the current entry @wr_idx. | ||
91 | * | ||
92 | * Return Value: | ||
93 | * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t | ||
94 | */ | ||
95 | int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) | ||
96 | { | ||
97 | int rd_idx; | ||
98 | int wr_idx; | ||
99 | int len = 0; | ||
100 | unsigned long flags; | ||
101 | char str[KSYM_SYMBOL_LEN]; | ||
102 | struct timespec val; | ||
103 | fnic_trace_data_t *tbp; | ||
104 | |||
105 | spin_lock_irqsave(&fnic_trace_lock, flags); | ||
106 | rd_idx = fnic_trace_entries.rd_idx; | ||
107 | wr_idx = fnic_trace_entries.wr_idx; | ||
108 | if (wr_idx < rd_idx) { | ||
109 | while (1) { | ||
110 | /* Start from read index @rd_idx */ | ||
111 | tbp = (fnic_trace_data_t *) | ||
112 | fnic_trace_entries.page_offset[rd_idx]; | ||
113 | if (!tbp) { | ||
114 | spin_unlock_irqrestore(&fnic_trace_lock, flags); | ||
115 | return 0; | ||
116 | } | ||
117 | /* Convert function pointer to function name */ | ||
118 | if (sizeof(unsigned long) < 8) { | ||
119 | sprint_symbol(str, tbp->fnaddr.low); | ||
120 | jiffies_to_timespec(tbp->timestamp.low, &val); | ||
121 | } else { | ||
122 | sprint_symbol(str, tbp->fnaddr.val); | ||
123 | jiffies_to_timespec(tbp->timestamp.val, &val); | ||
124 | } | ||
125 | /* | ||
126 | * Dump trace buffer entry to memory file | ||
127 | * and increment read index @rd_idx | ||
128 | */ | ||
129 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
130 | (trace_max_pages * PAGE_SIZE * 3) - len, | ||
131 | "%16lu.%16lu %-50s %8x %8x %16llx %16llx " | ||
132 | "%16llx %16llx %16llx\n", val.tv_sec, | ||
133 | val.tv_nsec, str, tbp->host_no, tbp->tag, | ||
134 | tbp->data[0], tbp->data[1], tbp->data[2], | ||
135 | tbp->data[3], tbp->data[4]); | ||
136 | rd_idx++; | ||
137 | /* | ||
138 | * If rd_idx is reached to maximum trace entries | ||
139 | * then move rd_idx to zero | ||
140 | */ | ||
141 | if (rd_idx > (fnic_max_trace_entries-1)) | ||
142 | rd_idx = 0; | ||
143 | /* | ||
144 | * Continure dumpping trace buffer entries into | ||
145 | * memory file till rd_idx reaches write index | ||
146 | */ | ||
147 | if (rd_idx == wr_idx) | ||
148 | break; | ||
149 | } | ||
150 | } else if (wr_idx > rd_idx) { | ||
151 | while (1) { | ||
152 | /* Start from read index @rd_idx */ | ||
153 | tbp = (fnic_trace_data_t *) | ||
154 | fnic_trace_entries.page_offset[rd_idx]; | ||
155 | if (!tbp) { | ||
156 | spin_unlock_irqrestore(&fnic_trace_lock, flags); | ||
157 | return 0; | ||
158 | } | ||
159 | /* Convert function pointer to function name */ | ||
160 | if (sizeof(unsigned long) < 8) { | ||
161 | sprint_symbol(str, tbp->fnaddr.low); | ||
162 | jiffies_to_timespec(tbp->timestamp.low, &val); | ||
163 | } else { | ||
164 | sprint_symbol(str, tbp->fnaddr.val); | ||
165 | jiffies_to_timespec(tbp->timestamp.val, &val); | ||
166 | } | ||
167 | /* | ||
168 | * Dump trace buffer entry to memory file | ||
169 | * and increment read index @rd_idx | ||
170 | */ | ||
171 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
172 | (trace_max_pages * PAGE_SIZE * 3) - len, | ||
173 | "%16lu.%16lu %-50s %8x %8x %16llx %16llx " | ||
174 | "%16llx %16llx %16llx\n", val.tv_sec, | ||
175 | val.tv_nsec, str, tbp->host_no, tbp->tag, | ||
176 | tbp->data[0], tbp->data[1], tbp->data[2], | ||
177 | tbp->data[3], tbp->data[4]); | ||
178 | rd_idx++; | ||
179 | /* | ||
180 | * Continue dumpping trace buffer entries into | ||
181 | * memory file till rd_idx reaches write index | ||
182 | */ | ||
183 | if (rd_idx == wr_idx) | ||
184 | break; | ||
185 | } | ||
186 | } | ||
187 | spin_unlock_irqrestore(&fnic_trace_lock, flags); | ||
188 | return len; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * fnic_trace_buf_init - Initialize fnic trace buffer logging facility | ||
193 | * | ||
194 | * Description: | ||
195 | * Initialize trace buffer data structure by allocating required memory and | ||
196 | * setting page_offset information for every trace entry by adding trace entry | ||
197 | * length to previous page_offset value. | ||
198 | */ | ||
199 | int fnic_trace_buf_init(void) | ||
200 | { | ||
201 | unsigned long fnic_buf_head; | ||
202 | int i; | ||
203 | int err = 0; | ||
204 | |||
205 | trace_max_pages = fnic_trace_max_pages; | ||
206 | fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/ | ||
207 | FNIC_ENTRY_SIZE_BYTES; | ||
208 | |||
209 | fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE)); | ||
210 | if (!fnic_trace_buf_p) { | ||
211 | printk(KERN_ERR PFX "Failed to allocate memory " | ||
212 | "for fnic_trace_buf_p\n"); | ||
213 | err = -ENOMEM; | ||
214 | goto err_fnic_trace_buf_init; | ||
215 | } | ||
216 | memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE)); | ||
217 | |||
218 | fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries * | ||
219 | sizeof(unsigned long)); | ||
220 | if (!fnic_trace_entries.page_offset) { | ||
221 | printk(KERN_ERR PFX "Failed to allocate memory for" | ||
222 | " page_offset\n"); | ||
223 | if (fnic_trace_buf_p) { | ||
224 | vfree((void *)fnic_trace_buf_p); | ||
225 | fnic_trace_buf_p = 0; | ||
226 | } | ||
227 | err = -ENOMEM; | ||
228 | goto err_fnic_trace_buf_init; | ||
229 | } | ||
230 | memset((void *)fnic_trace_entries.page_offset, 0, | ||
231 | (fnic_max_trace_entries * sizeof(unsigned long))); | ||
232 | fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0; | ||
233 | fnic_buf_head = fnic_trace_buf_p; | ||
234 | |||
235 | /* | ||
236 | * Set page_offset field of fnic_trace_entries struct by | ||
237 | * calculating memory location for every trace entry using | ||
238 | * length of each trace entry | ||
239 | */ | ||
240 | for (i = 0; i < fnic_max_trace_entries; i++) { | ||
241 | fnic_trace_entries.page_offset[i] = fnic_buf_head; | ||
242 | fnic_buf_head += FNIC_ENTRY_SIZE_BYTES; | ||
243 | } | ||
244 | err = fnic_trace_debugfs_init(); | ||
245 | if (err < 0) { | ||
246 | printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n"); | ||
247 | goto err_fnic_trace_debugfs_init; | ||
248 | } | ||
249 | printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n"); | ||
250 | return err; | ||
251 | err_fnic_trace_debugfs_init: | ||
252 | fnic_trace_free(); | ||
253 | err_fnic_trace_buf_init: | ||
254 | return err; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * fnic_trace_free - Free memory of fnic trace data structures. | ||
259 | */ | ||
260 | void fnic_trace_free(void) | ||
261 | { | ||
262 | fnic_tracing_enabled = 0; | ||
263 | fnic_trace_debugfs_terminate(); | ||
264 | if (fnic_trace_entries.page_offset) { | ||
265 | vfree((void *)fnic_trace_entries.page_offset); | ||
266 | fnic_trace_entries.page_offset = NULL; | ||
267 | } | ||
268 | if (fnic_trace_buf_p) { | ||
269 | vfree((void *)fnic_trace_buf_p); | ||
270 | fnic_trace_buf_p = 0; | ||
271 | } | ||
272 | printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); | ||
273 | } | ||
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h new file mode 100644 index 000000000000..cef42b4c4d6c --- /dev/null +++ b/drivers/scsi/fnic/fnic_trace.h | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Cisco Systems, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you may redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; version 2 of the License. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
10 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
11 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
12 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
14 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
15 | * SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef __FNIC_TRACE_H__ | ||
19 | #define __FNIC_TRACE_H__ | ||
20 | |||
21 | #define FNIC_ENTRY_SIZE_BYTES 64 | ||
22 | |||
23 | extern ssize_t simple_read_from_buffer(void __user *to, | ||
24 | size_t count, | ||
25 | loff_t *ppos, | ||
26 | const void *from, | ||
27 | size_t available); | ||
28 | |||
29 | extern unsigned int fnic_trace_max_pages; | ||
30 | extern int fnic_tracing_enabled; | ||
31 | extern unsigned int trace_max_pages; | ||
32 | |||
33 | typedef struct fnic_trace_dbg { | ||
34 | int wr_idx; | ||
35 | int rd_idx; | ||
36 | unsigned long *page_offset; | ||
37 | } fnic_trace_dbg_t; | ||
38 | |||
39 | typedef struct fnic_dbgfs { | ||
40 | int buffer_len; | ||
41 | char *buffer; | ||
42 | } fnic_dbgfs_t; | ||
43 | |||
44 | struct fnic_trace_data { | ||
45 | union { | ||
46 | struct { | ||
47 | u32 low; | ||
48 | u32 high; | ||
49 | }; | ||
50 | u64 val; | ||
51 | } timestamp, fnaddr; | ||
52 | u32 host_no; | ||
53 | u32 tag; | ||
54 | u64 data[5]; | ||
55 | } __attribute__((__packed__)); | ||
56 | |||
57 | typedef struct fnic_trace_data fnic_trace_data_t; | ||
58 | |||
59 | #define FNIC_TRACE_ENTRY_SIZE \ | ||
60 | (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t)) | ||
61 | |||
62 | #define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e) \ | ||
63 | if (unlikely(fnic_tracing_enabled)) { \ | ||
64 | fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \ | ||
65 | if (trace_buf) { \ | ||
66 | if (sizeof(unsigned long) < 8) { \ | ||
67 | trace_buf->timestamp.low = jiffies; \ | ||
68 | trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \ | ||
69 | } else { \ | ||
70 | trace_buf->timestamp.val = jiffies; \ | ||
71 | trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \ | ||
72 | } \ | ||
73 | trace_buf->host_no = _hn; \ | ||
74 | trace_buf->tag = _t; \ | ||
75 | trace_buf->data[0] = (u64)(unsigned long)_a; \ | ||
76 | trace_buf->data[1] = (u64)(unsigned long)_b; \ | ||
77 | trace_buf->data[2] = (u64)(unsigned long)_c; \ | ||
78 | trace_buf->data[3] = (u64)(unsigned long)_d; \ | ||
79 | trace_buf->data[4] = (u64)(unsigned long)_e; \ | ||
80 | } \ | ||
81 | } | ||
82 | |||
83 | fnic_trace_data_t *fnic_trace_get_buf(void); | ||
84 | int fnic_get_trace_data(fnic_dbgfs_t *); | ||
85 | int fnic_trace_buf_init(void); | ||
86 | void fnic_trace_free(void); | ||
87 | int fnic_trace_debugfs_init(void); | ||
88 | void fnic_trace_debugfs_terminate(void); | ||
89 | |||
90 | #endif | ||
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4f338061b5c3..7f4f790a3d71 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -165,7 +165,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c); | |||
165 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); | 165 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); |
166 | static struct CommandList *cmd_alloc(struct ctlr_info *h); | 166 | static struct CommandList *cmd_alloc(struct ctlr_info *h); |
167 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); | 167 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); |
168 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | 168 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
169 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, | 169 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
170 | int cmd_type); | 170 | int cmd_type); |
171 | 171 | ||
@@ -1131,7 +1131,7 @@ clean: | |||
1131 | return -ENOMEM; | 1131 | return -ENOMEM; |
1132 | } | 1132 | } |
1133 | 1133 | ||
1134 | static void hpsa_map_sg_chain_block(struct ctlr_info *h, | 1134 | static int hpsa_map_sg_chain_block(struct ctlr_info *h, |
1135 | struct CommandList *c) | 1135 | struct CommandList *c) |
1136 | { | 1136 | { |
1137 | struct SGDescriptor *chain_sg, *chain_block; | 1137 | struct SGDescriptor *chain_sg, *chain_block; |
@@ -1144,8 +1144,15 @@ static void hpsa_map_sg_chain_block(struct ctlr_info *h, | |||
1144 | (c->Header.SGTotal - h->max_cmd_sg_entries); | 1144 | (c->Header.SGTotal - h->max_cmd_sg_entries); |
1145 | temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, | 1145 | temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, |
1146 | PCI_DMA_TODEVICE); | 1146 | PCI_DMA_TODEVICE); |
1147 | if (dma_mapping_error(&h->pdev->dev, temp64)) { | ||
1148 | /* prevent subsequent unmapping */ | ||
1149 | chain_sg->Addr.lower = 0; | ||
1150 | chain_sg->Addr.upper = 0; | ||
1151 | return -1; | ||
1152 | } | ||
1147 | chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); | 1153 | chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); |
1148 | chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); | 1154 | chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); |
1155 | return 0; | ||
1149 | } | 1156 | } |
1150 | 1157 | ||
1151 | static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, | 1158 | static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, |
@@ -1390,7 +1397,7 @@ static void hpsa_pci_unmap(struct pci_dev *pdev, | |||
1390 | } | 1397 | } |
1391 | } | 1398 | } |
1392 | 1399 | ||
1393 | static void hpsa_map_one(struct pci_dev *pdev, | 1400 | static int hpsa_map_one(struct pci_dev *pdev, |
1394 | struct CommandList *cp, | 1401 | struct CommandList *cp, |
1395 | unsigned char *buf, | 1402 | unsigned char *buf, |
1396 | size_t buflen, | 1403 | size_t buflen, |
@@ -1401,10 +1408,16 @@ static void hpsa_map_one(struct pci_dev *pdev, | |||
1401 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { | 1408 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { |
1402 | cp->Header.SGList = 0; | 1409 | cp->Header.SGList = 0; |
1403 | cp->Header.SGTotal = 0; | 1410 | cp->Header.SGTotal = 0; |
1404 | return; | 1411 | return 0; |
1405 | } | 1412 | } |
1406 | 1413 | ||
1407 | addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); | 1414 | addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); |
1415 | if (dma_mapping_error(&pdev->dev, addr64)) { | ||
1416 | /* Prevent subsequent unmap of something never mapped */ | ||
1417 | cp->Header.SGList = 0; | ||
1418 | cp->Header.SGTotal = 0; | ||
1419 | return -1; | ||
1420 | } | ||
1408 | cp->SG[0].Addr.lower = | 1421 | cp->SG[0].Addr.lower = |
1409 | (u32) (addr64 & (u64) 0x00000000FFFFFFFF); | 1422 | (u32) (addr64 & (u64) 0x00000000FFFFFFFF); |
1410 | cp->SG[0].Addr.upper = | 1423 | cp->SG[0].Addr.upper = |
@@ -1412,6 +1425,7 @@ static void hpsa_map_one(struct pci_dev *pdev, | |||
1412 | cp->SG[0].Len = buflen; | 1425 | cp->SG[0].Len = buflen; |
1413 | cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ | 1426 | cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ |
1414 | cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ | 1427 | cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ |
1428 | return 0; | ||
1415 | } | 1429 | } |
1416 | 1430 | ||
1417 | static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, | 1431 | static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, |
@@ -1540,13 +1554,18 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |||
1540 | return -ENOMEM; | 1554 | return -ENOMEM; |
1541 | } | 1555 | } |
1542 | 1556 | ||
1543 | fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); | 1557 | if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, |
1558 | page, scsi3addr, TYPE_CMD)) { | ||
1559 | rc = -1; | ||
1560 | goto out; | ||
1561 | } | ||
1544 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | 1562 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); |
1545 | ei = c->err_info; | 1563 | ei = c->err_info; |
1546 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | 1564 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
1547 | hpsa_scsi_interpret_error(c); | 1565 | hpsa_scsi_interpret_error(c); |
1548 | rc = -1; | 1566 | rc = -1; |
1549 | } | 1567 | } |
1568 | out: | ||
1550 | cmd_special_free(h, c); | 1569 | cmd_special_free(h, c); |
1551 | return rc; | 1570 | return rc; |
1552 | } | 1571 | } |
@@ -1564,7 +1583,9 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) | |||
1564 | return -ENOMEM; | 1583 | return -ENOMEM; |
1565 | } | 1584 | } |
1566 | 1585 | ||
1567 | fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); | 1586 | /* fill_cmd can't fail here, no data buffer to map. */ |
1587 | (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, | ||
1588 | NULL, 0, 0, scsi3addr, TYPE_MSG); | ||
1568 | hpsa_scsi_do_simple_cmd_core(h, c); | 1589 | hpsa_scsi_do_simple_cmd_core(h, c); |
1569 | /* no unmap needed here because no data xfer. */ | 1590 | /* no unmap needed here because no data xfer. */ |
1570 | 1591 | ||
@@ -1631,8 +1652,11 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |||
1631 | } | 1652 | } |
1632 | /* address the controller */ | 1653 | /* address the controller */ |
1633 | memset(scsi3addr, 0, sizeof(scsi3addr)); | 1654 | memset(scsi3addr, 0, sizeof(scsi3addr)); |
1634 | fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, | 1655 | if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, |
1635 | buf, bufsize, 0, scsi3addr, TYPE_CMD); | 1656 | buf, bufsize, 0, scsi3addr, TYPE_CMD)) { |
1657 | rc = -1; | ||
1658 | goto out; | ||
1659 | } | ||
1636 | if (extended_response) | 1660 | if (extended_response) |
1637 | c->Request.CDB[1] = extended_response; | 1661 | c->Request.CDB[1] = extended_response; |
1638 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | 1662 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); |
@@ -1642,6 +1666,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |||
1642 | hpsa_scsi_interpret_error(c); | 1666 | hpsa_scsi_interpret_error(c); |
1643 | rc = -1; | 1667 | rc = -1; |
1644 | } | 1668 | } |
1669 | out: | ||
1645 | cmd_special_free(h, c); | 1670 | cmd_special_free(h, c); |
1646 | return rc; | 1671 | return rc; |
1647 | } | 1672 | } |
@@ -2105,7 +2130,10 @@ static int hpsa_scatter_gather(struct ctlr_info *h, | |||
2105 | if (chained) { | 2130 | if (chained) { |
2106 | cp->Header.SGList = h->max_cmd_sg_entries; | 2131 | cp->Header.SGList = h->max_cmd_sg_entries; |
2107 | cp->Header.SGTotal = (u16) (use_sg + 1); | 2132 | cp->Header.SGTotal = (u16) (use_sg + 1); |
2108 | hpsa_map_sg_chain_block(h, cp); | 2133 | if (hpsa_map_sg_chain_block(h, cp)) { |
2134 | scsi_dma_unmap(cmd); | ||
2135 | return -1; | ||
2136 | } | ||
2109 | return 0; | 2137 | return 0; |
2110 | } | 2138 | } |
2111 | 2139 | ||
@@ -2353,8 +2381,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h, | |||
2353 | if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) | 2381 | if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) |
2354 | waittime = waittime * 2; | 2382 | waittime = waittime * 2; |
2355 | 2383 | ||
2356 | /* Send the Test Unit Ready */ | 2384 | /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ |
2357 | fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); | 2385 | (void) fill_cmd(c, TEST_UNIT_READY, h, |
2386 | NULL, 0, 0, lunaddr, TYPE_CMD); | ||
2358 | hpsa_scsi_do_simple_cmd_core(h, c); | 2387 | hpsa_scsi_do_simple_cmd_core(h, c); |
2359 | /* no unmap needed here because no data xfer. */ | 2388 | /* no unmap needed here because no data xfer. */ |
2360 | 2389 | ||
@@ -2439,7 +2468,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2439 | return -ENOMEM; | 2468 | return -ENOMEM; |
2440 | } | 2469 | } |
2441 | 2470 | ||
2442 | fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG); | 2471 | /* fill_cmd can't fail here, no buffer to map */ |
2472 | (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, | ||
2473 | 0, 0, scsi3addr, TYPE_MSG); | ||
2443 | if (swizzle) | 2474 | if (swizzle) |
2444 | swizzle_abort_tag(&c->Request.CDB[4]); | 2475 | swizzle_abort_tag(&c->Request.CDB[4]); |
2445 | hpsa_scsi_do_simple_cmd_core(h, c); | 2476 | hpsa_scsi_do_simple_cmd_core(h, c); |
@@ -2928,6 +2959,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
2928 | struct CommandList *c; | 2959 | struct CommandList *c; |
2929 | char *buff = NULL; | 2960 | char *buff = NULL; |
2930 | union u64bit temp64; | 2961 | union u64bit temp64; |
2962 | int rc = 0; | ||
2931 | 2963 | ||
2932 | if (!argp) | 2964 | if (!argp) |
2933 | return -EINVAL; | 2965 | return -EINVAL; |
@@ -2947,8 +2979,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
2947 | /* Copy the data into the buffer we created */ | 2979 | /* Copy the data into the buffer we created */ |
2948 | if (copy_from_user(buff, iocommand.buf, | 2980 | if (copy_from_user(buff, iocommand.buf, |
2949 | iocommand.buf_size)) { | 2981 | iocommand.buf_size)) { |
2950 | kfree(buff); | 2982 | rc = -EFAULT; |
2951 | return -EFAULT; | 2983 | goto out_kfree; |
2952 | } | 2984 | } |
2953 | } else { | 2985 | } else { |
2954 | memset(buff, 0, iocommand.buf_size); | 2986 | memset(buff, 0, iocommand.buf_size); |
@@ -2956,8 +2988,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
2956 | } | 2988 | } |
2957 | c = cmd_special_alloc(h); | 2989 | c = cmd_special_alloc(h); |
2958 | if (c == NULL) { | 2990 | if (c == NULL) { |
2959 | kfree(buff); | 2991 | rc = -ENOMEM; |
2960 | return -ENOMEM; | 2992 | goto out_kfree; |
2961 | } | 2993 | } |
2962 | /* Fill in the command type */ | 2994 | /* Fill in the command type */ |
2963 | c->cmd_type = CMD_IOCTL_PEND; | 2995 | c->cmd_type = CMD_IOCTL_PEND; |
@@ -2982,6 +3014,13 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
2982 | if (iocommand.buf_size > 0) { | 3014 | if (iocommand.buf_size > 0) { |
2983 | temp64.val = pci_map_single(h->pdev, buff, | 3015 | temp64.val = pci_map_single(h->pdev, buff, |
2984 | iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); | 3016 | iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); |
3017 | if (dma_mapping_error(&h->pdev->dev, temp64.val)) { | ||
3018 | c->SG[0].Addr.lower = 0; | ||
3019 | c->SG[0].Addr.upper = 0; | ||
3020 | c->SG[0].Len = 0; | ||
3021 | rc = -ENOMEM; | ||
3022 | goto out; | ||
3023 | } | ||
2985 | c->SG[0].Addr.lower = temp64.val32.lower; | 3024 | c->SG[0].Addr.lower = temp64.val32.lower; |
2986 | c->SG[0].Addr.upper = temp64.val32.upper; | 3025 | c->SG[0].Addr.upper = temp64.val32.upper; |
2987 | c->SG[0].Len = iocommand.buf_size; | 3026 | c->SG[0].Len = iocommand.buf_size; |
@@ -2996,22 +3035,22 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
2996 | memcpy(&iocommand.error_info, c->err_info, | 3035 | memcpy(&iocommand.error_info, c->err_info, |
2997 | sizeof(iocommand.error_info)); | 3036 | sizeof(iocommand.error_info)); |
2998 | if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { | 3037 | if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { |
2999 | kfree(buff); | 3038 | rc = -EFAULT; |
3000 | cmd_special_free(h, c); | 3039 | goto out; |
3001 | return -EFAULT; | ||
3002 | } | 3040 | } |
3003 | if (iocommand.Request.Type.Direction == XFER_READ && | 3041 | if (iocommand.Request.Type.Direction == XFER_READ && |
3004 | iocommand.buf_size > 0) { | 3042 | iocommand.buf_size > 0) { |
3005 | /* Copy the data out of the buffer we created */ | 3043 | /* Copy the data out of the buffer we created */ |
3006 | if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { | 3044 | if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { |
3007 | kfree(buff); | 3045 | rc = -EFAULT; |
3008 | cmd_special_free(h, c); | 3046 | goto out; |
3009 | return -EFAULT; | ||
3010 | } | 3047 | } |
3011 | } | 3048 | } |
3012 | kfree(buff); | 3049 | out: |
3013 | cmd_special_free(h, c); | 3050 | cmd_special_free(h, c); |
3014 | return 0; | 3051 | out_kfree: |
3052 | kfree(buff); | ||
3053 | return rc; | ||
3015 | } | 3054 | } |
3016 | 3055 | ||
3017 | static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | 3056 | static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) |
@@ -3103,6 +3142,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
3103 | for (i = 0; i < sg_used; i++) { | 3142 | for (i = 0; i < sg_used; i++) { |
3104 | temp64.val = pci_map_single(h->pdev, buff[i], | 3143 | temp64.val = pci_map_single(h->pdev, buff[i], |
3105 | buff_size[i], PCI_DMA_BIDIRECTIONAL); | 3144 | buff_size[i], PCI_DMA_BIDIRECTIONAL); |
3145 | if (dma_mapping_error(&h->pdev->dev, temp64.val)) { | ||
3146 | c->SG[i].Addr.lower = 0; | ||
3147 | c->SG[i].Addr.upper = 0; | ||
3148 | c->SG[i].Len = 0; | ||
3149 | hpsa_pci_unmap(h->pdev, c, i, | ||
3150 | PCI_DMA_BIDIRECTIONAL); | ||
3151 | status = -ENOMEM; | ||
3152 | goto cleanup1; | ||
3153 | } | ||
3106 | c->SG[i].Addr.lower = temp64.val32.lower; | 3154 | c->SG[i].Addr.lower = temp64.val32.lower; |
3107 | c->SG[i].Addr.upper = temp64.val32.upper; | 3155 | c->SG[i].Addr.upper = temp64.val32.upper; |
3108 | c->SG[i].Len = buff_size[i]; | 3156 | c->SG[i].Len = buff_size[i]; |
@@ -3190,7 +3238,8 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, | |||
3190 | c = cmd_alloc(h); | 3238 | c = cmd_alloc(h); |
3191 | if (!c) | 3239 | if (!c) |
3192 | return -ENOMEM; | 3240 | return -ENOMEM; |
3193 | fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, | 3241 | /* fill_cmd can't fail here, no data buffer to map */ |
3242 | (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, | ||
3194 | RAID_CTLR_LUNID, TYPE_MSG); | 3243 | RAID_CTLR_LUNID, TYPE_MSG); |
3195 | c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ | 3244 | c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ |
3196 | c->waiting = NULL; | 3245 | c->waiting = NULL; |
@@ -3202,7 +3251,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, | |||
3202 | return 0; | 3251 | return 0; |
3203 | } | 3252 | } |
3204 | 3253 | ||
3205 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | 3254 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
3206 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, | 3255 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
3207 | int cmd_type) | 3256 | int cmd_type) |
3208 | { | 3257 | { |
@@ -3271,7 +3320,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
3271 | default: | 3320 | default: |
3272 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); | 3321 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); |
3273 | BUG(); | 3322 | BUG(); |
3274 | return; | 3323 | return -1; |
3275 | } | 3324 | } |
3276 | } else if (cmd_type == TYPE_MSG) { | 3325 | } else if (cmd_type == TYPE_MSG) { |
3277 | switch (cmd) { | 3326 | switch (cmd) { |
@@ -3343,10 +3392,9 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
3343 | default: | 3392 | default: |
3344 | pci_dir = PCI_DMA_BIDIRECTIONAL; | 3393 | pci_dir = PCI_DMA_BIDIRECTIONAL; |
3345 | } | 3394 | } |
3346 | 3395 | if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) | |
3347 | hpsa_map_one(h->pdev, c, buff, size, pci_dir); | 3396 | return -1; |
3348 | 3397 | return 0; | |
3349 | return; | ||
3350 | } | 3398 | } |
3351 | 3399 | ||
3352 | /* | 3400 | /* |
@@ -4882,10 +4930,13 @@ static void hpsa_flush_cache(struct ctlr_info *h) | |||
4882 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 4930 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); |
4883 | goto out_of_memory; | 4931 | goto out_of_memory; |
4884 | } | 4932 | } |
4885 | fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, | 4933 | if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, |
4886 | RAID_CTLR_LUNID, TYPE_CMD); | 4934 | RAID_CTLR_LUNID, TYPE_CMD)) { |
4935 | goto out; | ||
4936 | } | ||
4887 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); | 4937 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); |
4888 | if (c->err_info->CommandStatus != 0) | 4938 | if (c->err_info->CommandStatus != 0) |
4939 | out: | ||
4889 | dev_warn(&h->pdev->dev, | 4940 | dev_warn(&h->pdev->dev, |
4890 | "error flushing cache on controller\n"); | 4941 | "error flushing cache on controller\n"); |
4891 | cmd_special_free(h, c); | 4942 | cmd_special_free(h, c); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 8fa79b83f2d3..f328089a1060 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -6112,7 +6112,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost, | |||
6112 | * We have told the host to stop giving us new requests, but | 6112 | * We have told the host to stop giving us new requests, but |
6113 | * ERP ops don't count. FIXME | 6113 | * ERP ops don't count. FIXME |
6114 | */ | 6114 | */ |
6115 | if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) { | 6115 | if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { |
6116 | spin_unlock_irqrestore(hrrq->lock, hrrq_flags); | 6116 | spin_unlock_irqrestore(hrrq->lock, hrrq_flags); |
6117 | return SCSI_MLQUEUE_HOST_BUSY; | 6117 | return SCSI_MLQUEUE_HOST_BUSY; |
6118 | } | 6118 | } |
@@ -6121,7 +6121,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost, | |||
6121 | * FIXME - Create scsi_set_host_offline interface | 6121 | * FIXME - Create scsi_set_host_offline interface |
6122 | * and the ioa_is_dead check can be removed | 6122 | * and the ioa_is_dead check can be removed |
6123 | */ | 6123 | */ |
6124 | if (unlikely(hrrq->ioa_is_dead || !res)) { | 6124 | if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { |
6125 | spin_unlock_irqrestore(hrrq->lock, hrrq_flags); | 6125 | spin_unlock_irqrestore(hrrq->lock, hrrq_flags); |
6126 | goto err_nodev; | 6126 | goto err_nodev; |
6127 | } | 6127 | } |
@@ -6741,14 +6741,17 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) | |||
6741 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 6741 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
6742 | 6742 | ||
6743 | ENTER; | 6743 | ENTER; |
6744 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { | ||
6745 | ipr_trace; | ||
6746 | spin_unlock_irq(ioa_cfg->host->host_lock); | ||
6747 | scsi_unblock_requests(ioa_cfg->host); | ||
6748 | spin_lock_irq(ioa_cfg->host->host_lock); | ||
6749 | } | ||
6750 | |||
6744 | ioa_cfg->in_reset_reload = 0; | 6751 | ioa_cfg->in_reset_reload = 0; |
6745 | ioa_cfg->reset_retries = 0; | 6752 | ioa_cfg->reset_retries = 0; |
6746 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); | 6753 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); |
6747 | wake_up_all(&ioa_cfg->reset_wait_q); | 6754 | wake_up_all(&ioa_cfg->reset_wait_q); |
6748 | |||
6749 | spin_unlock_irq(ioa_cfg->host->host_lock); | ||
6750 | scsi_unblock_requests(ioa_cfg->host); | ||
6751 | spin_lock_irq(ioa_cfg->host->host_lock); | ||
6752 | LEAVE; | 6755 | LEAVE; |
6753 | 6756 | ||
6754 | return IPR_RC_JOB_RETURN; | 6757 | return IPR_RC_JOB_RETURN; |
@@ -8494,7 +8497,8 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
8494 | spin_unlock(&ioa_cfg->hrrq[i]._lock); | 8497 | spin_unlock(&ioa_cfg->hrrq[i]._lock); |
8495 | } | 8498 | } |
8496 | wmb(); | 8499 | wmb(); |
8497 | scsi_block_requests(ioa_cfg->host); | 8500 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) |
8501 | scsi_block_requests(ioa_cfg->host); | ||
8498 | 8502 | ||
8499 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); | 8503 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); |
8500 | ioa_cfg->reset_cmd = ipr_cmd; | 8504 | ioa_cfg->reset_cmd = ipr_cmd; |
@@ -8549,9 +8553,11 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
8549 | ipr_fail_all_ops(ioa_cfg); | 8553 | ipr_fail_all_ops(ioa_cfg); |
8550 | wake_up_all(&ioa_cfg->reset_wait_q); | 8554 | wake_up_all(&ioa_cfg->reset_wait_q); |
8551 | 8555 | ||
8552 | spin_unlock_irq(ioa_cfg->host->host_lock); | 8556 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { |
8553 | scsi_unblock_requests(ioa_cfg->host); | 8557 | spin_unlock_irq(ioa_cfg->host->host_lock); |
8554 | spin_lock_irq(ioa_cfg->host->host_lock); | 8558 | scsi_unblock_requests(ioa_cfg->host); |
8559 | spin_lock_irq(ioa_cfg->host->host_lock); | ||
8560 | } | ||
8555 | return; | 8561 | return; |
8556 | } else { | 8562 | } else { |
8557 | ioa_cfg->in_ioa_bringdown = 1; | 8563 | ioa_cfg->in_ioa_bringdown = 1; |
@@ -9695,6 +9701,7 @@ static void __ipr_remove(struct pci_dev *pdev) | |||
9695 | { | 9701 | { |
9696 | unsigned long host_lock_flags = 0; | 9702 | unsigned long host_lock_flags = 0; |
9697 | struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); | 9703 | struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); |
9704 | int i; | ||
9698 | ENTER; | 9705 | ENTER; |
9699 | 9706 | ||
9700 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); | 9707 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); |
@@ -9704,6 +9711,12 @@ static void __ipr_remove(struct pci_dev *pdev) | |||
9704 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); | 9711 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); |
9705 | } | 9712 | } |
9706 | 9713 | ||
9714 | for (i = 0; i < ioa_cfg->hrrq_num; i++) { | ||
9715 | spin_lock(&ioa_cfg->hrrq[i]._lock); | ||
9716 | ioa_cfg->hrrq[i].removing_ioa = 1; | ||
9717 | spin_unlock(&ioa_cfg->hrrq[i]._lock); | ||
9718 | } | ||
9719 | wmb(); | ||
9707 | ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); | 9720 | ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); |
9708 | 9721 | ||
9709 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); | 9722 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 1a9a246932ae..21a6ff1ed5c6 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -493,6 +493,7 @@ struct ipr_hrr_queue { | |||
493 | u8 allow_interrupts:1; | 493 | u8 allow_interrupts:1; |
494 | u8 ioa_is_dead:1; | 494 | u8 ioa_is_dead:1; |
495 | u8 allow_cmds:1; | 495 | u8 allow_cmds:1; |
496 | u8 removing_ioa:1; | ||
496 | 497 | ||
497 | struct blk_iopoll iopoll; | 498 | struct blk_iopoll iopoll; |
498 | }; | 499 | }; |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index fcb9d0b20ee4..09c81b2f2169 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -1381,10 +1381,10 @@ static void fc_fcp_timeout(unsigned long data) | |||
1381 | 1381 | ||
1382 | fsp->state |= FC_SRB_FCP_PROCESSING_TMO; | 1382 | fsp->state |= FC_SRB_FCP_PROCESSING_TMO; |
1383 | 1383 | ||
1384 | if (fsp->state & FC_SRB_RCV_STATUS) | 1384 | if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) |
1385 | fc_fcp_complete_locked(fsp); | ||
1386 | else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) | ||
1387 | fc_fcp_rec(fsp); | 1385 | fc_fcp_rec(fsp); |
1386 | else if (fsp->state & FC_SRB_RCV_STATUS) | ||
1387 | fc_fcp_complete_locked(fsp); | ||
1388 | else | 1388 | else |
1389 | fc_fcp_recovery(fsp, FC_TIMED_OUT); | 1389 | fc_fcp_recovery(fsp, FC_TIMED_OUT); |
1390 | fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; | 1390 | fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; |
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index c2830cc66d6a..b74189d89322 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h | |||
@@ -41,25 +41,25 @@ extern unsigned int fc_debug_logging; | |||
41 | 41 | ||
42 | #define FC_LIBFC_DBG(fmt, args...) \ | 42 | #define FC_LIBFC_DBG(fmt, args...) \ |
43 | FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ | 43 | FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ |
44 | printk(KERN_INFO "libfc: " fmt, ##args)) | 44 | pr_info("libfc: " fmt, ##args)) |
45 | 45 | ||
46 | #define FC_LPORT_DBG(lport, fmt, args...) \ | 46 | #define FC_LPORT_DBG(lport, fmt, args...) \ |
47 | FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ | 47 | FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ |
48 | printk(KERN_INFO "host%u: lport %6.6x: " fmt, \ | 48 | pr_info("host%u: lport %6.6x: " fmt, \ |
49 | (lport)->host->host_no, \ | 49 | (lport)->host->host_no, \ |
50 | (lport)->port_id, ##args)) | 50 | (lport)->port_id, ##args)) |
51 | 51 | ||
52 | #define FC_DISC_DBG(disc, fmt, args...) \ | 52 | #define FC_DISC_DBG(disc, fmt, args...) \ |
53 | FC_CHECK_LOGGING(FC_DISC_LOGGING, \ | 53 | FC_CHECK_LOGGING(FC_DISC_LOGGING, \ |
54 | printk(KERN_INFO "host%u: disc: " fmt, \ | 54 | pr_info("host%u: disc: " fmt, \ |
55 | fc_disc_lport(disc)->host->host_no, \ | 55 | fc_disc_lport(disc)->host->host_no, \ |
56 | ##args)) | 56 | ##args)) |
57 | 57 | ||
58 | #define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ | 58 | #define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ |
59 | FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ | 59 | FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ |
60 | printk(KERN_INFO "host%u: rport %6.6x: " fmt, \ | 60 | pr_info("host%u: rport %6.6x: " fmt, \ |
61 | (lport)->host->host_no, \ | 61 | (lport)->host->host_no, \ |
62 | (port_id), ##args)) | 62 | (port_id), ##args)) |
63 | 63 | ||
64 | #define FC_RPORT_DBG(rdata, fmt, args...) \ | 64 | #define FC_RPORT_DBG(rdata, fmt, args...) \ |
65 | FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args) | 65 | FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args) |
@@ -70,13 +70,13 @@ extern unsigned int fc_debug_logging; | |||
70 | if ((pkt)->seq_ptr) { \ | 70 | if ((pkt)->seq_ptr) { \ |
71 | struct fc_exch *_ep = NULL; \ | 71 | struct fc_exch *_ep = NULL; \ |
72 | _ep = fc_seq_exch((pkt)->seq_ptr); \ | 72 | _ep = fc_seq_exch((pkt)->seq_ptr); \ |
73 | printk(KERN_INFO "host%u: fcp: %6.6x: " \ | 73 | pr_info("host%u: fcp: %6.6x: " \ |
74 | "xid %04x-%04x: " fmt, \ | 74 | "xid %04x-%04x: " fmt, \ |
75 | (pkt)->lp->host->host_no, \ | 75 | (pkt)->lp->host->host_no, \ |
76 | (pkt)->rport->port_id, \ | 76 | (pkt)->rport->port_id, \ |
77 | (_ep)->oxid, (_ep)->rxid, ##args); \ | 77 | (_ep)->oxid, (_ep)->rxid, ##args); \ |
78 | } else { \ | 78 | } else { \ |
79 | printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \ | 79 | pr_info("host%u: fcp: %6.6x: " fmt, \ |
80 | (pkt)->lp->host->host_no, \ | 80 | (pkt)->lp->host->host_no, \ |
81 | (pkt)->rport->port_id, ##args); \ | 81 | (pkt)->rport->port_id, ##args); \ |
82 | } \ | 82 | } \ |
@@ -84,14 +84,14 @@ extern unsigned int fc_debug_logging; | |||
84 | 84 | ||
85 | #define FC_EXCH_DBG(exch, fmt, args...) \ | 85 | #define FC_EXCH_DBG(exch, fmt, args...) \ |
86 | FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ | 86 | FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ |
87 | printk(KERN_INFO "host%u: xid %4x: " fmt, \ | 87 | pr_info("host%u: xid %4x: " fmt, \ |
88 | (exch)->lp->host->host_no, \ | 88 | (exch)->lp->host->host_no, \ |
89 | exch->xid, ##args)) | 89 | exch->xid, ##args)) |
90 | 90 | ||
91 | #define FC_SCSI_DBG(lport, fmt, args...) \ | 91 | #define FC_SCSI_DBG(lport, fmt, args...) \ |
92 | FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ | 92 | FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ |
93 | printk(KERN_INFO "host%u: scsi: " fmt, \ | 93 | pr_info("host%u: scsi: " fmt, \ |
94 | (lport)->host->host_no, ##args)) | 94 | (lport)->host->host_no, ##args)) |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * FC-4 Providers. | 97 | * FC-4 Providers. |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 83aa1efec875..d518d17e940f 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -582,7 +582,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) | |||
582 | static void fc_rport_error_retry(struct fc_rport_priv *rdata, | 582 | static void fc_rport_error_retry(struct fc_rport_priv *rdata, |
583 | struct fc_frame *fp) | 583 | struct fc_frame *fp) |
584 | { | 584 | { |
585 | unsigned long delay = FC_DEF_E_D_TOV; | 585 | unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV); |
586 | 586 | ||
587 | /* make sure this isn't an FC_EX_CLOSED error, never retry those */ | 587 | /* make sure this isn't an FC_EX_CLOSED error, never retry those */ |
588 | if (PTR_ERR(fp) == -FC_EX_CLOSED) | 588 | if (PTR_ERR(fp) == -FC_EX_CLOSED) |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 55b6fc83ad71..74b67d98e952 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -6644,7 +6644,7 @@ static int | |||
6644 | lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | 6644 | lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, |
6645 | uint32_t flag) | 6645 | uint32_t flag) |
6646 | { | 6646 | { |
6647 | MAILBOX_t *mb; | 6647 | MAILBOX_t *mbx; |
6648 | struct lpfc_sli *psli = &phba->sli; | 6648 | struct lpfc_sli *psli = &phba->sli; |
6649 | uint32_t status, evtctr; | 6649 | uint32_t status, evtctr; |
6650 | uint32_t ha_copy, hc_copy; | 6650 | uint32_t ha_copy, hc_copy; |
@@ -6698,7 +6698,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6698 | 6698 | ||
6699 | psli = &phba->sli; | 6699 | psli = &phba->sli; |
6700 | 6700 | ||
6701 | mb = &pmbox->u.mb; | 6701 | mbx = &pmbox->u.mb; |
6702 | status = MBX_SUCCESS; | 6702 | status = MBX_SUCCESS; |
6703 | 6703 | ||
6704 | if (phba->link_state == LPFC_HBA_ERROR) { | 6704 | if (phba->link_state == LPFC_HBA_ERROR) { |
@@ -6713,7 +6713,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6713 | goto out_not_finished; | 6713 | goto out_not_finished; |
6714 | } | 6714 | } |
6715 | 6715 | ||
6716 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { | 6716 | if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { |
6717 | if (lpfc_readl(phba->HCregaddr, &hc_copy) || | 6717 | if (lpfc_readl(phba->HCregaddr, &hc_copy) || |
6718 | !(hc_copy & HC_MBINT_ENA)) { | 6718 | !(hc_copy & HC_MBINT_ENA)) { |
6719 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 6719 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
@@ -6767,7 +6767,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6767 | "(%d):0308 Mbox cmd issue - BUSY Data: " | 6767 | "(%d):0308 Mbox cmd issue - BUSY Data: " |
6768 | "x%x x%x x%x x%x\n", | 6768 | "x%x x%x x%x x%x\n", |
6769 | pmbox->vport ? pmbox->vport->vpi : 0xffffff, | 6769 | pmbox->vport ? pmbox->vport->vpi : 0xffffff, |
6770 | mb->mbxCommand, phba->pport->port_state, | 6770 | mbx->mbxCommand, phba->pport->port_state, |
6771 | psli->sli_flag, flag); | 6771 | psli->sli_flag, flag); |
6772 | 6772 | ||
6773 | psli->slistat.mbox_busy++; | 6773 | psli->slistat.mbox_busy++; |
@@ -6777,15 +6777,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6777 | lpfc_debugfs_disc_trc(pmbox->vport, | 6777 | lpfc_debugfs_disc_trc(pmbox->vport, |
6778 | LPFC_DISC_TRC_MBOX_VPORT, | 6778 | LPFC_DISC_TRC_MBOX_VPORT, |
6779 | "MBOX Bsy vport: cmd:x%x mb:x%x x%x", | 6779 | "MBOX Bsy vport: cmd:x%x mb:x%x x%x", |
6780 | (uint32_t)mb->mbxCommand, | 6780 | (uint32_t)mbx->mbxCommand, |
6781 | mb->un.varWords[0], mb->un.varWords[1]); | 6781 | mbx->un.varWords[0], mbx->un.varWords[1]); |
6782 | } | 6782 | } |
6783 | else { | 6783 | else { |
6784 | lpfc_debugfs_disc_trc(phba->pport, | 6784 | lpfc_debugfs_disc_trc(phba->pport, |
6785 | LPFC_DISC_TRC_MBOX, | 6785 | LPFC_DISC_TRC_MBOX, |
6786 | "MBOX Bsy: cmd:x%x mb:x%x x%x", | 6786 | "MBOX Bsy: cmd:x%x mb:x%x x%x", |
6787 | (uint32_t)mb->mbxCommand, | 6787 | (uint32_t)mbx->mbxCommand, |
6788 | mb->un.varWords[0], mb->un.varWords[1]); | 6788 | mbx->un.varWords[0], mbx->un.varWords[1]); |
6789 | } | 6789 | } |
6790 | 6790 | ||
6791 | return MBX_BUSY; | 6791 | return MBX_BUSY; |
@@ -6796,7 +6796,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6796 | /* If we are not polling, we MUST be in SLI2 mode */ | 6796 | /* If we are not polling, we MUST be in SLI2 mode */ |
6797 | if (flag != MBX_POLL) { | 6797 | if (flag != MBX_POLL) { |
6798 | if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && | 6798 | if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && |
6799 | (mb->mbxCommand != MBX_KILL_BOARD)) { | 6799 | (mbx->mbxCommand != MBX_KILL_BOARD)) { |
6800 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 6800 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
6801 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 6801 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
6802 | /* Mbox command <mbxCommand> cannot issue */ | 6802 | /* Mbox command <mbxCommand> cannot issue */ |
@@ -6818,23 +6818,23 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6818 | "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " | 6818 | "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " |
6819 | "x%x\n", | 6819 | "x%x\n", |
6820 | pmbox->vport ? pmbox->vport->vpi : 0, | 6820 | pmbox->vport ? pmbox->vport->vpi : 0, |
6821 | mb->mbxCommand, phba->pport->port_state, | 6821 | mbx->mbxCommand, phba->pport->port_state, |
6822 | psli->sli_flag, flag); | 6822 | psli->sli_flag, flag); |
6823 | 6823 | ||
6824 | if (mb->mbxCommand != MBX_HEARTBEAT) { | 6824 | if (mbx->mbxCommand != MBX_HEARTBEAT) { |
6825 | if (pmbox->vport) { | 6825 | if (pmbox->vport) { |
6826 | lpfc_debugfs_disc_trc(pmbox->vport, | 6826 | lpfc_debugfs_disc_trc(pmbox->vport, |
6827 | LPFC_DISC_TRC_MBOX_VPORT, | 6827 | LPFC_DISC_TRC_MBOX_VPORT, |
6828 | "MBOX Send vport: cmd:x%x mb:x%x x%x", | 6828 | "MBOX Send vport: cmd:x%x mb:x%x x%x", |
6829 | (uint32_t)mb->mbxCommand, | 6829 | (uint32_t)mbx->mbxCommand, |
6830 | mb->un.varWords[0], mb->un.varWords[1]); | 6830 | mbx->un.varWords[0], mbx->un.varWords[1]); |
6831 | } | 6831 | } |
6832 | else { | 6832 | else { |
6833 | lpfc_debugfs_disc_trc(phba->pport, | 6833 | lpfc_debugfs_disc_trc(phba->pport, |
6834 | LPFC_DISC_TRC_MBOX, | 6834 | LPFC_DISC_TRC_MBOX, |
6835 | "MBOX Send: cmd:x%x mb:x%x x%x", | 6835 | "MBOX Send: cmd:x%x mb:x%x x%x", |
6836 | (uint32_t)mb->mbxCommand, | 6836 | (uint32_t)mbx->mbxCommand, |
6837 | mb->un.varWords[0], mb->un.varWords[1]); | 6837 | mbx->un.varWords[0], mbx->un.varWords[1]); |
6838 | } | 6838 | } |
6839 | } | 6839 | } |
6840 | 6840 | ||
@@ -6842,12 +6842,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6842 | evtctr = psli->slistat.mbox_event; | 6842 | evtctr = psli->slistat.mbox_event; |
6843 | 6843 | ||
6844 | /* next set own bit for the adapter and copy over command word */ | 6844 | /* next set own bit for the adapter and copy over command word */ |
6845 | mb->mbxOwner = OWN_CHIP; | 6845 | mbx->mbxOwner = OWN_CHIP; |
6846 | 6846 | ||
6847 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { | 6847 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
6848 | /* Populate mbox extension offset word. */ | 6848 | /* Populate mbox extension offset word. */ |
6849 | if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { | 6849 | if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { |
6850 | *(((uint32_t *)mb) + pmbox->mbox_offset_word) | 6850 | *(((uint32_t *)mbx) + pmbox->mbox_offset_word) |
6851 | = (uint8_t *)phba->mbox_ext | 6851 | = (uint8_t *)phba->mbox_ext |
6852 | - (uint8_t *)phba->mbox; | 6852 | - (uint8_t *)phba->mbox; |
6853 | } | 6853 | } |
@@ -6859,11 +6859,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6859 | pmbox->in_ext_byte_len); | 6859 | pmbox->in_ext_byte_len); |
6860 | } | 6860 | } |
6861 | /* Copy command data to host SLIM area */ | 6861 | /* Copy command data to host SLIM area */ |
6862 | lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); | 6862 | lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); |
6863 | } else { | 6863 | } else { |
6864 | /* Populate mbox extension offset word. */ | 6864 | /* Populate mbox extension offset word. */ |
6865 | if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) | 6865 | if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) |
6866 | *(((uint32_t *)mb) + pmbox->mbox_offset_word) | 6866 | *(((uint32_t *)mbx) + pmbox->mbox_offset_word) |
6867 | = MAILBOX_HBA_EXT_OFFSET; | 6867 | = MAILBOX_HBA_EXT_OFFSET; |
6868 | 6868 | ||
6869 | /* Copy the mailbox extension data */ | 6869 | /* Copy the mailbox extension data */ |
@@ -6873,24 +6873,24 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6873 | pmbox->context2, pmbox->in_ext_byte_len); | 6873 | pmbox->context2, pmbox->in_ext_byte_len); |
6874 | 6874 | ||
6875 | } | 6875 | } |
6876 | if (mb->mbxCommand == MBX_CONFIG_PORT) { | 6876 | if (mbx->mbxCommand == MBX_CONFIG_PORT) { |
6877 | /* copy command data into host mbox for cmpl */ | 6877 | /* copy command data into host mbox for cmpl */ |
6878 | lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); | 6878 | lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); |
6879 | } | 6879 | } |
6880 | 6880 | ||
6881 | /* First copy mbox command data to HBA SLIM, skip past first | 6881 | /* First copy mbox command data to HBA SLIM, skip past first |
6882 | word */ | 6882 | word */ |
6883 | to_slim = phba->MBslimaddr + sizeof (uint32_t); | 6883 | to_slim = phba->MBslimaddr + sizeof (uint32_t); |
6884 | lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], | 6884 | lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], |
6885 | MAILBOX_CMD_SIZE - sizeof (uint32_t)); | 6885 | MAILBOX_CMD_SIZE - sizeof (uint32_t)); |
6886 | 6886 | ||
6887 | /* Next copy over first word, with mbxOwner set */ | 6887 | /* Next copy over first word, with mbxOwner set */ |
6888 | ldata = *((uint32_t *)mb); | 6888 | ldata = *((uint32_t *)mbx); |
6889 | to_slim = phba->MBslimaddr; | 6889 | to_slim = phba->MBslimaddr; |
6890 | writel(ldata, to_slim); | 6890 | writel(ldata, to_slim); |
6891 | readl(to_slim); /* flush */ | 6891 | readl(to_slim); /* flush */ |
6892 | 6892 | ||
6893 | if (mb->mbxCommand == MBX_CONFIG_PORT) { | 6893 | if (mbx->mbxCommand == MBX_CONFIG_PORT) { |
6894 | /* switch over to host mailbox */ | 6894 | /* switch over to host mailbox */ |
6895 | psli->sli_flag |= LPFC_SLI_ACTIVE; | 6895 | psli->sli_flag |= LPFC_SLI_ACTIVE; |
6896 | } | 6896 | } |
@@ -6965,7 +6965,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6965 | /* First copy command data */ | 6965 | /* First copy command data */ |
6966 | word0 = *((uint32_t *)phba->mbox); | 6966 | word0 = *((uint32_t *)phba->mbox); |
6967 | word0 = le32_to_cpu(word0); | 6967 | word0 = le32_to_cpu(word0); |
6968 | if (mb->mbxCommand == MBX_CONFIG_PORT) { | 6968 | if (mbx->mbxCommand == MBX_CONFIG_PORT) { |
6969 | MAILBOX_t *slimmb; | 6969 | MAILBOX_t *slimmb; |
6970 | uint32_t slimword0; | 6970 | uint32_t slimword0; |
6971 | /* Check real SLIM for any errors */ | 6971 | /* Check real SLIM for any errors */ |
@@ -6992,7 +6992,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
6992 | 6992 | ||
6993 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { | 6993 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
6994 | /* copy results back to user */ | 6994 | /* copy results back to user */ |
6995 | lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); | 6995 | lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE); |
6996 | /* Copy the mailbox extension data */ | 6996 | /* Copy the mailbox extension data */ |
6997 | if (pmbox->out_ext_byte_len && pmbox->context2) { | 6997 | if (pmbox->out_ext_byte_len && pmbox->context2) { |
6998 | lpfc_sli_pcimem_bcopy(phba->mbox_ext, | 6998 | lpfc_sli_pcimem_bcopy(phba->mbox_ext, |
@@ -7001,7 +7001,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
7001 | } | 7001 | } |
7002 | } else { | 7002 | } else { |
7003 | /* First copy command data */ | 7003 | /* First copy command data */ |
7004 | lpfc_memcpy_from_slim(mb, phba->MBslimaddr, | 7004 | lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, |
7005 | MAILBOX_CMD_SIZE); | 7005 | MAILBOX_CMD_SIZE); |
7006 | /* Copy the mailbox extension data */ | 7006 | /* Copy the mailbox extension data */ |
7007 | if (pmbox->out_ext_byte_len && pmbox->context2) { | 7007 | if (pmbox->out_ext_byte_len && pmbox->context2) { |
@@ -7016,7 +7016,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
7016 | readl(phba->HAregaddr); /* flush */ | 7016 | readl(phba->HAregaddr); /* flush */ |
7017 | 7017 | ||
7018 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 7018 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
7019 | status = mb->mbxStatus; | 7019 | status = mbx->mbxStatus; |
7020 | } | 7020 | } |
7021 | 7021 | ||
7022 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 7022 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 3b2365c8eab2..408d2548a748 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -33,9 +33,9 @@ | |||
33 | /* | 33 | /* |
34 | * MegaRAID SAS Driver meta data | 34 | * MegaRAID SAS Driver meta data |
35 | */ | 35 | */ |
36 | #define MEGASAS_VERSION "06.504.01.00-rc1" | 36 | #define MEGASAS_VERSION "06.506.00.00-rc1" |
37 | #define MEGASAS_RELDATE "Oct. 1, 2012" | 37 | #define MEGASAS_RELDATE "Feb. 9, 2013" |
38 | #define MEGASAS_EXT_VERSION "Mon. Oct. 1 17:00:00 PDT 2012" | 38 | #define MEGASAS_EXT_VERSION "Sat. Feb. 9 17:00:00 PDT 2013" |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * Device IDs | 41 | * Device IDs |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 66a0fec0437b..9d53540207ec 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -18,7 +18,7 @@ | |||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | * | 19 | * |
20 | * FILE: megaraid_sas_base.c | 20 | * FILE: megaraid_sas_base.c |
21 | * Version : v06.504.01.00-rc1 | 21 | * Version : v06.506.00.00-rc1 |
22 | * | 22 | * |
23 | * Authors: LSI Corporation | 23 | * Authors: LSI Corporation |
24 | * Sreenivas Bagalkote | 24 | * Sreenivas Bagalkote |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 74030aff69ad..a7d56687bfca 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -1206,7 +1206,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, | |||
1206 | MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; | 1206 | MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; |
1207 | } | 1207 | } |
1208 | io_request->Control |= (0x4 << 26); | 1208 | io_request->Control |= (0x4 << 26); |
1209 | io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE; | 1209 | io_request->EEDPBlockSize = scp->device->sector_size; |
1210 | } else { | 1210 | } else { |
1211 | /* Some drives don't support 16/12 byte CDB's, convert to 10 */ | 1211 | /* Some drives don't support 16/12 byte CDB's, convert to 10 */ |
1212 | if (((cdb_len == 12) || (cdb_len == 16)) && | 1212 | if (((cdb_len == 12) || (cdb_len == 16)) && |
@@ -1511,7 +1511,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
1511 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && | 1511 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && |
1512 | instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { | 1512 | instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { |
1513 | io_request->Function = 0; | 1513 | io_request->Function = 0; |
1514 | io_request->DevHandle = | 1514 | if (fusion->fast_path_io) |
1515 | io_request->DevHandle = | ||
1515 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; | 1516 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; |
1516 | io_request->RaidContext.timeoutValue = | 1517 | io_request->RaidContext.timeoutValue = |
1517 | local_map_ptr->raidMap.fpPdIoTimeoutSec; | 1518 | local_map_ptr->raidMap.fpPdIoTimeoutSec; |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index a7c64f051996..f68a3cd11d5d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h | |||
@@ -61,7 +61,6 @@ | |||
61 | #define MEGASAS_SCSI_ADDL_CDB_LEN 0x18 | 61 | #define MEGASAS_SCSI_ADDL_CDB_LEN 0x18 |
62 | #define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20 | 62 | #define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20 |
63 | #define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60 | 63 | #define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60 |
64 | #define MEGASAS_EEDPBLOCKSIZE 512 | ||
65 | 64 | ||
66 | /* | 65 | /* |
67 | * Raid context flags | 66 | * Raid context flags |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 5e24e7e73714..bcb23d28b3e8 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -2023,6 +2023,14 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) | |||
2023 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | 2023 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, |
2024 | MPT2SAS_INTEL_RMS25KB040_BRANDING); | 2024 | MPT2SAS_INTEL_RMS25KB040_BRANDING); |
2025 | break; | 2025 | break; |
2026 | case MPT2SAS_INTEL_RMS25LB040_SSDID: | ||
2027 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
2028 | MPT2SAS_INTEL_RMS25LB040_BRANDING); | ||
2029 | break; | ||
2030 | case MPT2SAS_INTEL_RMS25LB080_SSDID: | ||
2031 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
2032 | MPT2SAS_INTEL_RMS25LB080_BRANDING); | ||
2033 | break; | ||
2026 | default: | 2034 | default: |
2027 | break; | 2035 | break; |
2028 | } | 2036 | } |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index c6ee7aad7501..4caaac13682f 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -165,6 +165,10 @@ | |||
165 | "Intel(R) Integrated RAID Module RMS25KB080" | 165 | "Intel(R) Integrated RAID Module RMS25KB080" |
166 | #define MPT2SAS_INTEL_RMS25KB040_BRANDING \ | 166 | #define MPT2SAS_INTEL_RMS25KB040_BRANDING \ |
167 | "Intel(R) Integrated RAID Module RMS25KB040" | 167 | "Intel(R) Integrated RAID Module RMS25KB040" |
168 | #define MPT2SAS_INTEL_RMS25LB040_BRANDING \ | ||
169 | "Intel(R) Integrated RAID Module RMS25LB040" | ||
170 | #define MPT2SAS_INTEL_RMS25LB080_BRANDING \ | ||
171 | "Intel(R) Integrated RAID Module RMS25LB080" | ||
168 | #define MPT2SAS_INTEL_RMS2LL080_BRANDING \ | 172 | #define MPT2SAS_INTEL_RMS2LL080_BRANDING \ |
169 | "Intel Integrated RAID Module RMS2LL080" | 173 | "Intel Integrated RAID Module RMS2LL080" |
170 | #define MPT2SAS_INTEL_RMS2LL040_BRANDING \ | 174 | #define MPT2SAS_INTEL_RMS2LL040_BRANDING \ |
@@ -180,6 +184,8 @@ | |||
180 | #define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517 | 184 | #define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517 |
181 | #define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518 | 185 | #define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518 |
182 | #define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519 | 186 | #define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519 |
187 | #define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A | ||
188 | #define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B | ||
183 | #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E | 189 | #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E |
184 | #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F | 190 | #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F |
185 | #define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 | 191 | #define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 |
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 078c63913b55..532110f4562a 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c | |||
@@ -316,10 +316,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, | |||
316 | struct mvs_task_exec_info *tei) | 316 | struct mvs_task_exec_info *tei) |
317 | { | 317 | { |
318 | int elem, rc, i; | 318 | int elem, rc, i; |
319 | struct sas_ha_struct *sha = mvi->sas; | ||
319 | struct sas_task *task = tei->task; | 320 | struct sas_task *task = tei->task; |
320 | struct mvs_cmd_hdr *hdr = tei->hdr; | 321 | struct mvs_cmd_hdr *hdr = tei->hdr; |
321 | struct domain_device *dev = task->dev; | 322 | struct domain_device *dev = task->dev; |
322 | struct asd_sas_port *sas_port = dev->port; | 323 | struct asd_sas_port *sas_port = dev->port; |
324 | struct sas_phy *sphy = dev->phy; | ||
325 | struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; | ||
323 | struct scatterlist *sg_req, *sg_resp; | 326 | struct scatterlist *sg_req, *sg_resp; |
324 | u32 req_len, resp_len, tag = tei->tag; | 327 | u32 req_len, resp_len, tag = tei->tag; |
325 | void *buf_tmp; | 328 | void *buf_tmp; |
@@ -392,7 +395,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, | |||
392 | slot->tx = mvi->tx_prod; | 395 | slot->tx = mvi->tx_prod; |
393 | mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | | 396 | mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | |
394 | TXQ_MODE_I | tag | | 397 | TXQ_MODE_I | tag | |
395 | (sas_port->phy_mask << TXQ_PHY_SHIFT)); | 398 | (MVS_PHY_ID << TXQ_PHY_SHIFT)); |
396 | 399 | ||
397 | hdr->flags |= flags; | 400 | hdr->flags |= flags; |
398 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); | 401 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); |
@@ -438,11 +441,14 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) | |||
438 | static int mvs_task_prep_ata(struct mvs_info *mvi, | 441 | static int mvs_task_prep_ata(struct mvs_info *mvi, |
439 | struct mvs_task_exec_info *tei) | 442 | struct mvs_task_exec_info *tei) |
440 | { | 443 | { |
444 | struct sas_ha_struct *sha = mvi->sas; | ||
441 | struct sas_task *task = tei->task; | 445 | struct sas_task *task = tei->task; |
442 | struct domain_device *dev = task->dev; | 446 | struct domain_device *dev = task->dev; |
443 | struct mvs_device *mvi_dev = dev->lldd_dev; | 447 | struct mvs_device *mvi_dev = dev->lldd_dev; |
444 | struct mvs_cmd_hdr *hdr = tei->hdr; | 448 | struct mvs_cmd_hdr *hdr = tei->hdr; |
445 | struct asd_sas_port *sas_port = dev->port; | 449 | struct asd_sas_port *sas_port = dev->port; |
450 | struct sas_phy *sphy = dev->phy; | ||
451 | struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; | ||
446 | struct mvs_slot_info *slot; | 452 | struct mvs_slot_info *slot; |
447 | void *buf_prd; | 453 | void *buf_prd; |
448 | u32 tag = tei->tag, hdr_tag; | 454 | u32 tag = tei->tag, hdr_tag; |
@@ -462,7 +468,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, | |||
462 | slot->tx = mvi->tx_prod; | 468 | slot->tx = mvi->tx_prod; |
463 | del_q = TXQ_MODE_I | tag | | 469 | del_q = TXQ_MODE_I | tag | |
464 | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | | 470 | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | |
465 | (sas_port->phy_mask << TXQ_PHY_SHIFT) | | 471 | (MVS_PHY_ID << TXQ_PHY_SHIFT) | |
466 | (mvi_dev->taskfileset << TXQ_SRS_SHIFT); | 472 | (mvi_dev->taskfileset << TXQ_SRS_SHIFT); |
467 | mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); | 473 | mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); |
468 | 474 | ||
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 2ae77a0394b2..9f3cc13a5ce7 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h | |||
@@ -76,6 +76,7 @@ extern struct kmem_cache *mvs_task_list_cache; | |||
76 | (__mc) != 0 ; \ | 76 | (__mc) != 0 ; \ |
77 | (++__lseq), (__mc) >>= 1) | 77 | (++__lseq), (__mc) >>= 1) |
78 | 78 | ||
79 | #define MVS_PHY_ID (1U << sas_phy->id) | ||
79 | #define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f) | 80 | #define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f) |
80 | #define UNASSOC_D2H_FIS(id) \ | 81 | #define UNASSOC_D2H_FIS(id) \ |
81 | ((void *) mvi->rx_fis + 0x100 * id) | 82 | ((void *) mvi->rx_fis + 0x100 * id) |
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index c06b8e5aa2cf..d8293f25ca33 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c | |||
@@ -144,6 +144,10 @@ static int _osd_get_print_system_info(struct osd_dev *od, | |||
144 | odi->osdname_len = get_attrs[a].len; | 144 | odi->osdname_len = get_attrs[a].len; |
145 | /* Avoid NULL for memcmp optimization 0-length is good enough */ | 145 | /* Avoid NULL for memcmp optimization 0-length is good enough */ |
146 | odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL); | 146 | odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL); |
147 | if (!odi->osdname) { | ||
148 | ret = -ENOMEM; | ||
149 | goto out; | ||
150 | } | ||
147 | if (odi->osdname_len) | 151 | if (odi->osdname_len) |
148 | memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len); | 152 | memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len); |
149 | OSD_INFO("OSD_NAME [%s]\n", odi->osdname); | 153 | OSD_INFO("OSD_NAME [%s]\n", odi->osdname); |
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 4c9fe733fe88..3d5e522e00fc 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c | |||
@@ -140,7 +140,8 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) | |||
140 | for (i = 0; i < USI_MAX_MEMCNT; i++) { | 140 | for (i = 0; i < USI_MAX_MEMCNT; i++) { |
141 | if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { | 141 | if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { |
142 | pci_free_consistent(pm8001_ha->pdev, | 142 | pci_free_consistent(pm8001_ha->pdev, |
143 | pm8001_ha->memoryMap.region[i].element_size, | 143 | (pm8001_ha->memoryMap.region[i].total_len + |
144 | pm8001_ha->memoryMap.region[i].alignment), | ||
144 | pm8001_ha->memoryMap.region[i].virt_ptr, | 145 | pm8001_ha->memoryMap.region[i].virt_ptr, |
145 | pm8001_ha->memoryMap.region[i].phys_addr); | 146 | pm8001_ha->memoryMap.region[i].phys_addr); |
146 | } | 147 | } |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 83d798428c10..1d82eef4e1eb 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -1272,22 +1272,29 @@ qla2x00_thermal_temp_show(struct device *dev, | |||
1272 | struct device_attribute *attr, char *buf) | 1272 | struct device_attribute *attr, char *buf) |
1273 | { | 1273 | { |
1274 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | 1274 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); |
1275 | int rval = QLA_FUNCTION_FAILED; | 1275 | uint16_t temp = 0; |
1276 | uint16_t temp, frac; | ||
1277 | 1276 | ||
1278 | if (!vha->hw->flags.thermal_supported) | 1277 | if (!vha->hw->thermal_support) { |
1279 | return snprintf(buf, PAGE_SIZE, "\n"); | 1278 | ql_log(ql_log_warn, vha, 0x70db, |
1279 | "Thermal not supported by this card.\n"); | ||
1280 | goto done; | ||
1281 | } | ||
1280 | 1282 | ||
1281 | temp = frac = 0; | 1283 | if (qla2x00_reset_active(vha)) { |
1282 | if (qla2x00_reset_active(vha)) | 1284 | ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); |
1283 | ql_log(ql_log_warn, vha, 0x707b, | 1285 | goto done; |
1284 | "ISP reset active.\n"); | 1286 | } |
1285 | else if (!vha->hw->flags.eeh_busy) | 1287 | |
1286 | rval = qla2x00_get_thermal_temp(vha, &temp, &frac); | 1288 | if (vha->hw->flags.eeh_busy) { |
1287 | if (rval != QLA_SUCCESS) | 1289 | ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n"); |
1288 | return snprintf(buf, PAGE_SIZE, "\n"); | 1290 | goto done; |
1291 | } | ||
1292 | |||
1293 | if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS) | ||
1294 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); | ||
1289 | 1295 | ||
1290 | return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac); | 1296 | done: |
1297 | return snprintf(buf, PAGE_SIZE, "\n"); | ||
1291 | } | 1298 | } |
1292 | 1299 | ||
1293 | static ssize_t | 1300 | static ssize_t |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 9f34dedcdad7..ad54099cb805 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c | |||
@@ -27,7 +27,7 @@ void | |||
27 | qla2x00_bsg_sp_free(void *data, void *ptr) | 27 | qla2x00_bsg_sp_free(void *data, void *ptr) |
28 | { | 28 | { |
29 | srb_t *sp = (srb_t *)ptr; | 29 | srb_t *sp = (srb_t *)ptr; |
30 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; | 30 | struct scsi_qla_host *vha = sp->fcport->vha; |
31 | struct fc_bsg_job *bsg_job = sp->u.bsg_job; | 31 | struct fc_bsg_job *bsg_job = sp->u.bsg_job; |
32 | struct qla_hw_data *ha = vha->hw; | 32 | struct qla_hw_data *ha = vha->hw; |
33 | 33 | ||
@@ -40,7 +40,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr) | |||
40 | if (sp->type == SRB_CT_CMD || | 40 | if (sp->type == SRB_CT_CMD || |
41 | sp->type == SRB_ELS_CMD_HST) | 41 | sp->type == SRB_ELS_CMD_HST) |
42 | kfree(sp->fcport); | 42 | kfree(sp->fcport); |
43 | mempool_free(sp, vha->hw->srb_mempool); | 43 | qla2x00_rel_sp(vha, sp); |
44 | } | 44 | } |
45 | 45 | ||
46 | int | 46 | int |
@@ -368,7 +368,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) | |||
368 | if (rval != QLA_SUCCESS) { | 368 | if (rval != QLA_SUCCESS) { |
369 | ql_log(ql_log_warn, vha, 0x700e, | 369 | ql_log(ql_log_warn, vha, 0x700e, |
370 | "qla2x00_start_sp failed = %d\n", rval); | 370 | "qla2x00_start_sp failed = %d\n", rval); |
371 | mempool_free(sp, ha->srb_mempool); | 371 | qla2x00_rel_sp(vha, sp); |
372 | rval = -EIO; | 372 | rval = -EIO; |
373 | goto done_unmap_sg; | 373 | goto done_unmap_sg; |
374 | } | 374 | } |
@@ -515,7 +515,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) | |||
515 | if (rval != QLA_SUCCESS) { | 515 | if (rval != QLA_SUCCESS) { |
516 | ql_log(ql_log_warn, vha, 0x7017, | 516 | ql_log(ql_log_warn, vha, 0x7017, |
517 | "qla2x00_start_sp failed=%d.\n", rval); | 517 | "qla2x00_start_sp failed=%d.\n", rval); |
518 | mempool_free(sp, ha->srb_mempool); | 518 | qla2x00_rel_sp(vha, sp); |
519 | rval = -EIO; | 519 | rval = -EIO; |
520 | goto done_free_fcport; | 520 | goto done_free_fcport; |
521 | } | 521 | } |
@@ -531,6 +531,75 @@ done_unmap_sg: | |||
531 | done: | 531 | done: |
532 | return rval; | 532 | return rval; |
533 | } | 533 | } |
534 | |||
535 | /* Disable loopback mode */ | ||
536 | static inline int | ||
537 | qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, | ||
538 | int wait, int wait2) | ||
539 | { | ||
540 | int ret = 0; | ||
541 | int rval = 0; | ||
542 | uint16_t new_config[4]; | ||
543 | struct qla_hw_data *ha = vha->hw; | ||
544 | |||
545 | if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) | ||
546 | goto done_reset_internal; | ||
547 | |||
548 | memset(new_config, 0 , sizeof(new_config)); | ||
549 | if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == | ||
550 | ENABLE_INTERNAL_LOOPBACK || | ||
551 | (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == | ||
552 | ENABLE_EXTERNAL_LOOPBACK) { | ||
553 | new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; | ||
554 | ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", | ||
555 | (new_config[0] & INTERNAL_LOOPBACK_MASK)); | ||
556 | memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; | ||
557 | |||
558 | ha->notify_dcbx_comp = wait; | ||
559 | ha->notify_lb_portup_comp = wait2; | ||
560 | |||
561 | ret = qla81xx_set_port_config(vha, new_config); | ||
562 | if (ret != QLA_SUCCESS) { | ||
563 | ql_log(ql_log_warn, vha, 0x7025, | ||
564 | "Set port config failed.\n"); | ||
565 | ha->notify_dcbx_comp = 0; | ||
566 | ha->notify_lb_portup_comp = 0; | ||
567 | rval = -EINVAL; | ||
568 | goto done_reset_internal; | ||
569 | } | ||
570 | |||
571 | /* Wait for DCBX complete event */ | ||
572 | if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, | ||
573 | (DCBX_COMP_TIMEOUT * HZ))) { | ||
574 | ql_dbg(ql_dbg_user, vha, 0x7026, | ||
575 | "DCBX completion not received.\n"); | ||
576 | ha->notify_dcbx_comp = 0; | ||
577 | ha->notify_lb_portup_comp = 0; | ||
578 | rval = -EINVAL; | ||
579 | goto done_reset_internal; | ||
580 | } else | ||
581 | ql_dbg(ql_dbg_user, vha, 0x7027, | ||
582 | "DCBX completion received.\n"); | ||
583 | |||
584 | if (wait2 && | ||
585 | !wait_for_completion_timeout(&ha->lb_portup_comp, | ||
586 | (LB_PORTUP_COMP_TIMEOUT * HZ))) { | ||
587 | ql_dbg(ql_dbg_user, vha, 0x70c5, | ||
588 | "Port up completion not received.\n"); | ||
589 | ha->notify_lb_portup_comp = 0; | ||
590 | rval = -EINVAL; | ||
591 | goto done_reset_internal; | ||
592 | } else | ||
593 | ql_dbg(ql_dbg_user, vha, 0x70c6, | ||
594 | "Port up completion received.\n"); | ||
595 | |||
596 | ha->notify_dcbx_comp = 0; | ||
597 | ha->notify_lb_portup_comp = 0; | ||
598 | } | ||
599 | done_reset_internal: | ||
600 | return rval; | ||
601 | } | ||
602 | |||
534 | /* | 603 | /* |
535 | * Set the port configuration to enable the internal or external loopback | 604 | * Set the port configuration to enable the internal or external loopback |
536 | * depending on the loopback mode. | 605 | * depending on the loopback mode. |
@@ -566,9 +635,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, | |||
566 | } | 635 | } |
567 | 636 | ||
568 | /* Wait for DCBX complete event */ | 637 | /* Wait for DCBX complete event */ |
569 | if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { | 638 | if (!wait_for_completion_timeout(&ha->dcbx_comp, |
639 | (DCBX_COMP_TIMEOUT * HZ))) { | ||
570 | ql_dbg(ql_dbg_user, vha, 0x7022, | 640 | ql_dbg(ql_dbg_user, vha, 0x7022, |
571 | "State change notification not received.\n"); | 641 | "DCBX completion not received.\n"); |
642 | ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); | ||
643 | /* | ||
644 | * If the reset of the loopback mode doesn't work take a FCoE | ||
645 | * dump and reset the chip. | ||
646 | */ | ||
647 | if (ret) { | ||
648 | ha->isp_ops->fw_dump(vha, 0); | ||
649 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | ||
650 | } | ||
572 | rval = -EINVAL; | 651 | rval = -EINVAL; |
573 | } else { | 652 | } else { |
574 | if (ha->flags.idc_compl_status) { | 653 | if (ha->flags.idc_compl_status) { |
@@ -578,7 +657,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, | |||
578 | ha->flags.idc_compl_status = 0; | 657 | ha->flags.idc_compl_status = 0; |
579 | } else | 658 | } else |
580 | ql_dbg(ql_dbg_user, vha, 0x7023, | 659 | ql_dbg(ql_dbg_user, vha, 0x7023, |
581 | "State change received.\n"); | 660 | "DCBX completion received.\n"); |
582 | } | 661 | } |
583 | 662 | ||
584 | ha->notify_dcbx_comp = 0; | 663 | ha->notify_dcbx_comp = 0; |
@@ -587,57 +666,6 @@ done_set_internal: | |||
587 | return rval; | 666 | return rval; |
588 | } | 667 | } |
589 | 668 | ||
590 | /* Disable loopback mode */ | ||
591 | static inline int | ||
592 | qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, | ||
593 | int wait) | ||
594 | { | ||
595 | int ret = 0; | ||
596 | int rval = 0; | ||
597 | uint16_t new_config[4]; | ||
598 | struct qla_hw_data *ha = vha->hw; | ||
599 | |||
600 | if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) | ||
601 | goto done_reset_internal; | ||
602 | |||
603 | memset(new_config, 0 , sizeof(new_config)); | ||
604 | if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == | ||
605 | ENABLE_INTERNAL_LOOPBACK || | ||
606 | (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == | ||
607 | ENABLE_EXTERNAL_LOOPBACK) { | ||
608 | new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; | ||
609 | ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", | ||
610 | (new_config[0] & INTERNAL_LOOPBACK_MASK)); | ||
611 | memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; | ||
612 | |||
613 | ha->notify_dcbx_comp = wait; | ||
614 | ret = qla81xx_set_port_config(vha, new_config); | ||
615 | if (ret != QLA_SUCCESS) { | ||
616 | ql_log(ql_log_warn, vha, 0x7025, | ||
617 | "Set port config failed.\n"); | ||
618 | ha->notify_dcbx_comp = 0; | ||
619 | rval = -EINVAL; | ||
620 | goto done_reset_internal; | ||
621 | } | ||
622 | |||
623 | /* Wait for DCBX complete event */ | ||
624 | if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, | ||
625 | (20 * HZ))) { | ||
626 | ql_dbg(ql_dbg_user, vha, 0x7026, | ||
627 | "State change notification not received.\n"); | ||
628 | ha->notify_dcbx_comp = 0; | ||
629 | rval = -EINVAL; | ||
630 | goto done_reset_internal; | ||
631 | } else | ||
632 | ql_dbg(ql_dbg_user, vha, 0x7027, | ||
633 | "State change received.\n"); | ||
634 | |||
635 | ha->notify_dcbx_comp = 0; | ||
636 | } | ||
637 | done_reset_internal: | ||
638 | return rval; | ||
639 | } | ||
640 | |||
641 | static int | 669 | static int |
642 | qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | 670 | qla2x00_process_loopback(struct fc_bsg_job *bsg_job) |
643 | { | 671 | { |
@@ -739,6 +767,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
739 | if (IS_QLA81XX(ha) || IS_QLA8031(ha)) { | 767 | if (IS_QLA81XX(ha) || IS_QLA8031(ha)) { |
740 | memset(config, 0, sizeof(config)); | 768 | memset(config, 0, sizeof(config)); |
741 | memset(new_config, 0, sizeof(new_config)); | 769 | memset(new_config, 0, sizeof(new_config)); |
770 | |||
742 | if (qla81xx_get_port_config(vha, config)) { | 771 | if (qla81xx_get_port_config(vha, config)) { |
743 | ql_log(ql_log_warn, vha, 0x701f, | 772 | ql_log(ql_log_warn, vha, 0x701f, |
744 | "Get port config failed.\n"); | 773 | "Get port config failed.\n"); |
@@ -746,6 +775,14 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
746 | goto done_free_dma_rsp; | 775 | goto done_free_dma_rsp; |
747 | } | 776 | } |
748 | 777 | ||
778 | if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { | ||
779 | ql_dbg(ql_dbg_user, vha, 0x70c4, | ||
780 | "Loopback operation already in " | ||
781 | "progress.\n"); | ||
782 | rval = -EAGAIN; | ||
783 | goto done_free_dma_rsp; | ||
784 | } | ||
785 | |||
749 | ql_dbg(ql_dbg_user, vha, 0x70c0, | 786 | ql_dbg(ql_dbg_user, vha, 0x70c0, |
750 | "elreq.options=%04x\n", elreq.options); | 787 | "elreq.options=%04x\n", elreq.options); |
751 | 788 | ||
@@ -755,7 +792,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
755 | config, new_config, elreq.options); | 792 | config, new_config, elreq.options); |
756 | else | 793 | else |
757 | rval = qla81xx_reset_loopback_mode(vha, | 794 | rval = qla81xx_reset_loopback_mode(vha, |
758 | config, 1); | 795 | config, 1, 0); |
759 | else | 796 | else |
760 | rval = qla81xx_set_loopback_mode(vha, config, | 797 | rval = qla81xx_set_loopback_mode(vha, config, |
761 | new_config, elreq.options); | 798 | new_config, elreq.options); |
@@ -772,14 +809,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
772 | command_sent = INT_DEF_LB_LOOPBACK_CMD; | 809 | command_sent = INT_DEF_LB_LOOPBACK_CMD; |
773 | rval = qla2x00_loopback_test(vha, &elreq, response); | 810 | rval = qla2x00_loopback_test(vha, &elreq, response); |
774 | 811 | ||
775 | if (new_config[0]) { | ||
776 | /* Revert back to original port config | ||
777 | * Also clear internal loopback | ||
778 | */ | ||
779 | qla81xx_reset_loopback_mode(vha, | ||
780 | new_config, 0); | ||
781 | } | ||
782 | |||
783 | if (response[0] == MBS_COMMAND_ERROR && | 812 | if (response[0] == MBS_COMMAND_ERROR && |
784 | response[1] == MBS_LB_RESET) { | 813 | response[1] == MBS_LB_RESET) { |
785 | ql_log(ql_log_warn, vha, 0x7029, | 814 | ql_log(ql_log_warn, vha, 0x7029, |
@@ -788,15 +817,39 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
788 | qla2xxx_wake_dpc(vha); | 817 | qla2xxx_wake_dpc(vha); |
789 | qla2x00_wait_for_chip_reset(vha); | 818 | qla2x00_wait_for_chip_reset(vha); |
790 | /* Also reset the MPI */ | 819 | /* Also reset the MPI */ |
791 | if (qla81xx_restart_mpi_firmware(vha) != | 820 | if (IS_QLA81XX(ha)) { |
792 | QLA_SUCCESS) { | 821 | if (qla81xx_restart_mpi_firmware(vha) != |
793 | ql_log(ql_log_warn, vha, 0x702a, | 822 | QLA_SUCCESS) { |
794 | "MPI reset failed.\n"); | 823 | ql_log(ql_log_warn, vha, 0x702a, |
824 | "MPI reset failed.\n"); | ||
825 | } | ||
795 | } | 826 | } |
796 | 827 | ||
797 | rval = -EIO; | 828 | rval = -EIO; |
798 | goto done_free_dma_rsp; | 829 | goto done_free_dma_rsp; |
799 | } | 830 | } |
831 | |||
832 | if (new_config[0]) { | ||
833 | int ret; | ||
834 | |||
835 | /* Revert back to original port config | ||
836 | * Also clear internal loopback | ||
837 | */ | ||
838 | ret = qla81xx_reset_loopback_mode(vha, | ||
839 | new_config, 0, 1); | ||
840 | if (ret) { | ||
841 | /* | ||
842 | * If the reset of the loopback mode | ||
843 | * doesn't work take FCoE dump and then | ||
844 | * reset the chip. | ||
845 | */ | ||
846 | ha->isp_ops->fw_dump(vha, 0); | ||
847 | set_bit(ISP_ABORT_NEEDED, | ||
848 | &vha->dpc_flags); | ||
849 | } | ||
850 | |||
851 | } | ||
852 | |||
800 | } else { | 853 | } else { |
801 | type = "FC_BSG_HST_VENDOR_LOOPBACK"; | 854 | type = "FC_BSG_HST_VENDOR_LOOPBACK"; |
802 | ql_dbg(ql_dbg_user, vha, 0x702b, | 855 | ql_dbg(ql_dbg_user, vha, 0x702b, |
@@ -1950,7 +2003,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) | |||
1950 | if (!req) | 2003 | if (!req) |
1951 | continue; | 2004 | continue; |
1952 | 2005 | ||
1953 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | 2006 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { |
1954 | sp = req->outstanding_cmds[cnt]; | 2007 | sp = req->outstanding_cmds[cnt]; |
1955 | if (sp) { | 2008 | if (sp) { |
1956 | if (((sp->type == SRB_CT_CMD) || | 2009 | if (((sp->type == SRB_CT_CMD) || |
@@ -1985,6 +2038,6 @@ done: | |||
1985 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2038 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1986 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) | 2039 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) |
1987 | kfree(sp->fcport); | 2040 | kfree(sp->fcport); |
1988 | mempool_free(sp, ha->srb_mempool); | 2041 | qla2x00_rel_sp(vha, sp); |
1989 | return 0; | 2042 | return 0; |
1990 | } | 2043 | } |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index 37b8b7ba7421..e9f6b9bbf29a 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 53f9e492f9dc..1626de52e32a 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -11,20 +11,21 @@ | |||
11 | * ---------------------------------------------------------------------- | 11 | * ---------------------------------------------------------------------- |
12 | * | Level | Last Value Used | Holes | | 12 | * | Level | Last Value Used | Holes | |
13 | * ---------------------------------------------------------------------- | 13 | * ---------------------------------------------------------------------- |
14 | * | Module Init and Probe | 0x0125 | 0x4b,0xba,0xfa | | 14 | * | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa | |
15 | * | Mailbox commands | 0x114f | 0x111a-0x111b | | 15 | * | Mailbox commands | 0x115b | 0x111a-0x111b | |
16 | * | | | 0x112c-0x112e | | 16 | * | | | 0x112c-0x112e | |
17 | * | | | 0x113a | | 17 | * | | | 0x113a | |
18 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | | 18 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | |
19 | * | | | 0x2016 | | 19 | * | | | 0x2016 | |
20 | * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b | | 20 | * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | |
21 | * | | | 0x3027-0x3028 | | 21 | * | | | 0x3027-0x3028 | |
22 | * | | | 0x302d-0x302e | | 22 | * | | | 0x302d-0x302e | |
23 | * | DPC Thread | 0x401d | 0x4002,0x4013 | | 23 | * | DPC Thread | 0x401d | 0x4002,0x4013 | |
24 | * | Async Events | 0x5071 | 0x502b-0x502f | | 24 | * | Async Events | 0x5071 | 0x502b-0x502f | |
25 | * | | | 0x5047,0x5052 | | 25 | * | | | 0x5047,0x5052 | |
26 | * | Timer Routines | 0x6011 | | | 26 | * | Timer Routines | 0x6011 | | |
27 | * | User Space Interactions | 0x70c3 | 0x7018,0x702e, | | 27 | * | User Space Interactions | 0x70c4 | 0x7018,0x702e, | |
28 | * | | | 0x7020,0x7024, | | ||
28 | * | | | 0x7039,0x7045, | | 29 | * | | | 0x7039,0x7045, | |
29 | * | | | 0x7073-0x7075, | | 30 | * | | | 0x7073-0x7075, | |
30 | * | | | 0x708c, | | 31 | * | | | 0x708c, | |
@@ -35,11 +36,11 @@ | |||
35 | * | | | 0x800b,0x8039 | | 36 | * | | | 0x800b,0x8039 | |
36 | * | AER/EEH | 0x9011 | | | 37 | * | AER/EEH | 0x9011 | | |
37 | * | Virtual Port | 0xa007 | | | 38 | * | Virtual Port | 0xa007 | | |
38 | * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 | | 39 | * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 | |
39 | * | MultiQ | 0xc00c | | | 40 | * | MultiQ | 0xc00c | | |
40 | * | Misc | 0xd010 | | | 41 | * | Misc | 0xd010 | | |
41 | * | Target Mode | 0xe06f | | | 42 | * | Target Mode | 0xe070 | | |
42 | * | Target Mode Management | 0xf071 | | | 43 | * | Target Mode Management | 0xf072 | | |
43 | * | Target Mode Task Management | 0x1000b | | | 44 | * | Target Mode Task Management | 0x1000b | | |
44 | * ---------------------------------------------------------------------- | 45 | * ---------------------------------------------------------------------- |
45 | */ | 46 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 8f911c0b1e74..35e20b4f8b6c 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 6e7727f46d43..c6509911772b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -37,6 +37,7 @@ | |||
37 | #include "qla_nx.h" | 37 | #include "qla_nx.h" |
38 | #define QLA2XXX_DRIVER_NAME "qla2xxx" | 38 | #define QLA2XXX_DRIVER_NAME "qla2xxx" |
39 | #define QLA2XXX_APIDEV "ql2xapidev" | 39 | #define QLA2XXX_APIDEV "ql2xapidev" |
40 | #define QLA2XXX_MANUFACTURER "QLogic Corporation" | ||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * We have MAILBOX_REGISTER_COUNT sized arrays in a few places, | 43 | * We have MAILBOX_REGISTER_COUNT sized arrays in a few places, |
@@ -253,8 +254,8 @@ | |||
253 | #define LOOP_DOWN_TIME 255 /* 240 */ | 254 | #define LOOP_DOWN_TIME 255 /* 240 */ |
254 | #define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30) | 255 | #define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30) |
255 | 256 | ||
256 | /* Maximum outstanding commands in ISP queues (1-65535) */ | 257 | #define DEFAULT_OUTSTANDING_COMMANDS 1024 |
257 | #define MAX_OUTSTANDING_COMMANDS 1024 | 258 | #define MIN_OUTSTANDING_COMMANDS 128 |
258 | 259 | ||
259 | /* ISP request and response entry counts (37-65535) */ | 260 | /* ISP request and response entry counts (37-65535) */ |
260 | #define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */ | 261 | #define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */ |
@@ -537,6 +538,8 @@ struct device_reg_25xxmq { | |||
537 | uint32_t req_q_out; | 538 | uint32_t req_q_out; |
538 | uint32_t rsp_q_in; | 539 | uint32_t rsp_q_in; |
539 | uint32_t rsp_q_out; | 540 | uint32_t rsp_q_out; |
541 | uint32_t atio_q_in; | ||
542 | uint32_t atio_q_out; | ||
540 | }; | 543 | }; |
541 | 544 | ||
542 | typedef union { | 545 | typedef union { |
@@ -563,6 +566,9 @@ typedef union { | |||
563 | &(reg)->u.isp2100.mailbox5 : \ | 566 | &(reg)->u.isp2100.mailbox5 : \ |
564 | &(reg)->u.isp2300.rsp_q_out) | 567 | &(reg)->u.isp2300.rsp_q_out) |
565 | 568 | ||
569 | #define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in) | ||
570 | #define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out) | ||
571 | |||
566 | #define MAILBOX_REG(ha, reg, num) \ | 572 | #define MAILBOX_REG(ha, reg, num) \ |
567 | (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ | 573 | (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ |
568 | (num < 8 ? \ | 574 | (num < 8 ? \ |
@@ -762,8 +768,8 @@ typedef struct { | |||
762 | #define MBC_PORT_LOGOUT 0x56 /* Port Logout request */ | 768 | #define MBC_PORT_LOGOUT 0x56 /* Port Logout request */ |
763 | #define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */ | 769 | #define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */ |
764 | #define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */ | 770 | #define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */ |
765 | #define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */ | 771 | #define MBC_GET_RNID_PARAMS 0x5a /* Get RNID parameters */ |
766 | #define MBC_DATA_RATE 0x5d /* Get RNID parameters */ | 772 | #define MBC_DATA_RATE 0x5d /* Data Rate */ |
767 | #define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */ | 773 | #define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */ |
768 | #define MBC_INITIATE_LIP 0x62 /* Initiate Loop */ | 774 | #define MBC_INITIATE_LIP 0x62 /* Initiate Loop */ |
769 | /* Initialization Procedure */ | 775 | /* Initialization Procedure */ |
@@ -809,6 +815,7 @@ typedef struct { | |||
809 | #define MBC_HOST_MEMORY_COPY 0x53 /* Host Memory Copy. */ | 815 | #define MBC_HOST_MEMORY_COPY 0x53 /* Host Memory Copy. */ |
810 | #define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */ | 816 | #define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */ |
811 | #define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ | 817 | #define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ |
818 | #define MBC_LINK_INITIALIZATION 0x72 /* Do link initialization. */ | ||
812 | #define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ | 819 | #define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ |
813 | #define MBC_PORT_RESET 0x120 /* Port Reset */ | 820 | #define MBC_PORT_RESET 0x120 /* Port Reset */ |
814 | #define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ | 821 | #define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ |
@@ -856,6 +863,9 @@ typedef struct { | |||
856 | #define MBX_1 BIT_1 | 863 | #define MBX_1 BIT_1 |
857 | #define MBX_0 BIT_0 | 864 | #define MBX_0 BIT_0 |
858 | 865 | ||
866 | #define RNID_TYPE_SET_VERSION 0x9 | ||
867 | #define RNID_TYPE_ASIC_TEMP 0xC | ||
868 | |||
859 | /* | 869 | /* |
860 | * Firmware state codes from get firmware state mailbox command | 870 | * Firmware state codes from get firmware state mailbox command |
861 | */ | 871 | */ |
@@ -1841,9 +1851,6 @@ typedef struct fc_port { | |||
1841 | uint8_t scan_state; | 1851 | uint8_t scan_state; |
1842 | } fc_port_t; | 1852 | } fc_port_t; |
1843 | 1853 | ||
1844 | #define QLA_FCPORT_SCAN_NONE 0 | ||
1845 | #define QLA_FCPORT_SCAN_FOUND 1 | ||
1846 | |||
1847 | /* | 1854 | /* |
1848 | * Fibre channel port/lun states. | 1855 | * Fibre channel port/lun states. |
1849 | */ | 1856 | */ |
@@ -2533,8 +2540,10 @@ struct req_que { | |||
2533 | uint16_t qos; | 2540 | uint16_t qos; |
2534 | uint16_t vp_idx; | 2541 | uint16_t vp_idx; |
2535 | struct rsp_que *rsp; | 2542 | struct rsp_que *rsp; |
2536 | srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS]; | 2543 | srb_t **outstanding_cmds; |
2537 | uint32_t current_outstanding_cmd; | 2544 | uint32_t current_outstanding_cmd; |
2545 | uint16_t num_outstanding_cmds; | ||
2546 | #define MAX_Q_DEPTH 32 | ||
2538 | int max_q_depth; | 2547 | int max_q_depth; |
2539 | }; | 2548 | }; |
2540 | 2549 | ||
@@ -2557,11 +2566,13 @@ struct qlt_hw_data { | |||
2557 | struct atio *atio_ring_ptr; /* Current address. */ | 2566 | struct atio *atio_ring_ptr; /* Current address. */ |
2558 | uint16_t atio_ring_index; /* Current index. */ | 2567 | uint16_t atio_ring_index; /* Current index. */ |
2559 | uint16_t atio_q_length; | 2568 | uint16_t atio_q_length; |
2569 | uint32_t __iomem *atio_q_in; | ||
2570 | uint32_t __iomem *atio_q_out; | ||
2560 | 2571 | ||
2561 | void *target_lport_ptr; | 2572 | void *target_lport_ptr; |
2562 | struct qla_tgt_func_tmpl *tgt_ops; | 2573 | struct qla_tgt_func_tmpl *tgt_ops; |
2563 | struct qla_tgt *qla_tgt; | 2574 | struct qla_tgt *qla_tgt; |
2564 | struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS]; | 2575 | struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS]; |
2565 | uint16_t current_handle; | 2576 | uint16_t current_handle; |
2566 | 2577 | ||
2567 | struct qla_tgt_vp_map *tgt_vp_map; | 2578 | struct qla_tgt_vp_map *tgt_vp_map; |
@@ -2618,7 +2629,6 @@ struct qla_hw_data { | |||
2618 | uint32_t nic_core_hung:1; | 2629 | uint32_t nic_core_hung:1; |
2619 | 2630 | ||
2620 | uint32_t quiesce_owner:1; | 2631 | uint32_t quiesce_owner:1; |
2621 | uint32_t thermal_supported:1; | ||
2622 | uint32_t nic_core_reset_hdlr_active:1; | 2632 | uint32_t nic_core_reset_hdlr_active:1; |
2623 | uint32_t nic_core_reset_owner:1; | 2633 | uint32_t nic_core_reset_owner:1; |
2624 | uint32_t isp82xx_no_md_cap:1; | 2634 | uint32_t isp82xx_no_md_cap:1; |
@@ -2788,6 +2798,8 @@ struct qla_hw_data { | |||
2788 | #define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) | 2798 | #define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) |
2789 | #define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ | 2799 | #define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ |
2790 | (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) | 2800 | (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) |
2801 | #define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) | ||
2802 | #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) | ||
2791 | 2803 | ||
2792 | /* HBA serial number */ | 2804 | /* HBA serial number */ |
2793 | uint8_t serial0; | 2805 | uint8_t serial0; |
@@ -2870,7 +2882,13 @@ struct qla_hw_data { | |||
2870 | struct completion mbx_cmd_comp; /* Serialize mbx access */ | 2882 | struct completion mbx_cmd_comp; /* Serialize mbx access */ |
2871 | struct completion mbx_intr_comp; /* Used for completion notification */ | 2883 | struct completion mbx_intr_comp; /* Used for completion notification */ |
2872 | struct completion dcbx_comp; /* For set port config notification */ | 2884 | struct completion dcbx_comp; /* For set port config notification */ |
2885 | struct completion lb_portup_comp; /* Used to wait for link up during | ||
2886 | * loopback */ | ||
2887 | #define DCBX_COMP_TIMEOUT 20 | ||
2888 | #define LB_PORTUP_COMP_TIMEOUT 10 | ||
2889 | |||
2873 | int notify_dcbx_comp; | 2890 | int notify_dcbx_comp; |
2891 | int notify_lb_portup_comp; | ||
2874 | struct mutex selflogin_lock; | 2892 | struct mutex selflogin_lock; |
2875 | 2893 | ||
2876 | /* Basic firmware related information. */ | 2894 | /* Basic firmware related information. */ |
@@ -2887,6 +2905,7 @@ struct qla_hw_data { | |||
2887 | #define RISC_START_ADDRESS_2300 0x800 | 2905 | #define RISC_START_ADDRESS_2300 0x800 |
2888 | #define RISC_START_ADDRESS_2400 0x100000 | 2906 | #define RISC_START_ADDRESS_2400 0x100000 |
2889 | uint16_t fw_xcb_count; | 2907 | uint16_t fw_xcb_count; |
2908 | uint16_t fw_iocb_count; | ||
2890 | 2909 | ||
2891 | uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ | 2910 | uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ |
2892 | uint8_t fw_seriallink_options[4]; | 2911 | uint8_t fw_seriallink_options[4]; |
@@ -3056,7 +3075,16 @@ struct qla_hw_data { | |||
3056 | struct work_struct idc_state_handler; | 3075 | struct work_struct idc_state_handler; |
3057 | struct work_struct nic_core_unrecoverable; | 3076 | struct work_struct nic_core_unrecoverable; |
3058 | 3077 | ||
3078 | #define HOST_QUEUE_RAMPDOWN_INTERVAL (60 * HZ) | ||
3079 | #define HOST_QUEUE_RAMPUP_INTERVAL (30 * HZ) | ||
3080 | unsigned long host_last_rampdown_time; | ||
3081 | unsigned long host_last_rampup_time; | ||
3082 | int cfg_lun_q_depth; | ||
3083 | |||
3059 | struct qlt_hw_data tgt; | 3084 | struct qlt_hw_data tgt; |
3085 | uint16_t thermal_support; | ||
3086 | #define THERMAL_SUPPORT_I2C BIT_0 | ||
3087 | #define THERMAL_SUPPORT_ISP BIT_1 | ||
3060 | }; | 3088 | }; |
3061 | 3089 | ||
3062 | /* | 3090 | /* |
@@ -3115,6 +3143,8 @@ typedef struct scsi_qla_host { | |||
3115 | #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ | 3143 | #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ |
3116 | #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ | 3144 | #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ |
3117 | #define SCR_PENDING 21 /* SCR in target mode */ | 3145 | #define SCR_PENDING 21 /* SCR in target mode */ |
3146 | #define HOST_RAMP_DOWN_QUEUE_DEPTH 22 | ||
3147 | #define HOST_RAMP_UP_QUEUE_DEPTH 23 | ||
3118 | 3148 | ||
3119 | uint32_t device_flags; | 3149 | uint32_t device_flags; |
3120 | #define SWITCH_FOUND BIT_0 | 3150 | #define SWITCH_FOUND BIT_0 |
@@ -3248,8 +3278,6 @@ struct qla_tgt_vp_map { | |||
3248 | 3278 | ||
3249 | #define NVRAM_DELAY() udelay(10) | 3279 | #define NVRAM_DELAY() udelay(10) |
3250 | 3280 | ||
3251 | #define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS+1) | ||
3252 | |||
3253 | /* | 3281 | /* |
3254 | * Flash support definitions | 3282 | * Flash support definitions |
3255 | */ | 3283 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 706c4f7bc7c9..792a29294b62 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index be6d61a89edc..1ac2b0e3a0e1 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -300,7 +300,8 @@ struct init_cb_24xx { | |||
300 | uint32_t prio_request_q_address[2]; | 300 | uint32_t prio_request_q_address[2]; |
301 | 301 | ||
302 | uint16_t msix; | 302 | uint16_t msix; |
303 | uint8_t reserved_2[6]; | 303 | uint16_t msix_atio; |
304 | uint8_t reserved_2[4]; | ||
304 | 305 | ||
305 | uint16_t atio_q_inpointer; | 306 | uint16_t atio_q_inpointer; |
306 | uint16_t atio_q_length; | 307 | uint16_t atio_q_length; |
@@ -1387,9 +1388,7 @@ struct qla_flt_header { | |||
1387 | #define FLT_REG_FCP_PRIO_0 0x87 | 1388 | #define FLT_REG_FCP_PRIO_0 0x87 |
1388 | #define FLT_REG_FCP_PRIO_1 0x88 | 1389 | #define FLT_REG_FCP_PRIO_1 0x88 |
1389 | #define FLT_REG_FCOE_FW 0xA4 | 1390 | #define FLT_REG_FCOE_FW 0xA4 |
1390 | #define FLT_REG_FCOE_VPD_0 0xA9 | ||
1391 | #define FLT_REG_FCOE_NVRAM_0 0xAA | 1391 | #define FLT_REG_FCOE_NVRAM_0 0xAA |
1392 | #define FLT_REG_FCOE_VPD_1 0xAB | ||
1393 | #define FLT_REG_FCOE_NVRAM_1 0xAC | 1392 | #define FLT_REG_FCOE_NVRAM_1 0xAC |
1394 | 1393 | ||
1395 | struct qla_flt_region { | 1394 | struct qla_flt_region { |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 2411d1a12b26..eb3ca21a7f17 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -55,7 +55,7 @@ extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); | |||
55 | extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); | 55 | extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); |
56 | extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); | 56 | extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); |
57 | 57 | ||
58 | extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *); | 58 | extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *); |
59 | 59 | ||
60 | extern void qla84xx_put_chip(struct scsi_qla_host *); | 60 | extern void qla84xx_put_chip(struct scsi_qla_host *); |
61 | 61 | ||
@@ -84,6 +84,9 @@ extern int qla83xx_nic_core_reset(scsi_qla_host_t *); | |||
84 | extern void qla83xx_reset_ownership(scsi_qla_host_t *); | 84 | extern void qla83xx_reset_ownership(scsi_qla_host_t *); |
85 | extern int qla2xxx_mctp_dump(scsi_qla_host_t *); | 85 | extern int qla2xxx_mctp_dump(scsi_qla_host_t *); |
86 | 86 | ||
87 | extern int | ||
88 | qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *); | ||
89 | |||
87 | /* | 90 | /* |
88 | * Global Data in qla_os.c source file. | 91 | * Global Data in qla_os.c source file. |
89 | */ | 92 | */ |
@@ -94,6 +97,7 @@ extern int qlport_down_retry; | |||
94 | extern int ql2xplogiabsentdevice; | 97 | extern int ql2xplogiabsentdevice; |
95 | extern int ql2xloginretrycount; | 98 | extern int ql2xloginretrycount; |
96 | extern int ql2xfdmienable; | 99 | extern int ql2xfdmienable; |
100 | extern int ql2xmaxqdepth; | ||
97 | extern int ql2xallocfwdump; | 101 | extern int ql2xallocfwdump; |
98 | extern int ql2xextended_error_logging; | 102 | extern int ql2xextended_error_logging; |
99 | extern int ql2xiidmaenable; | 103 | extern int ql2xiidmaenable; |
@@ -278,6 +282,9 @@ extern int | |||
278 | qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t); | 282 | qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t); |
279 | 283 | ||
280 | extern int | 284 | extern int |
285 | qla24xx_link_initialize(scsi_qla_host_t *); | ||
286 | |||
287 | extern int | ||
281 | qla2x00_lip_reset(scsi_qla_host_t *); | 288 | qla2x00_lip_reset(scsi_qla_host_t *); |
282 | 289 | ||
283 | extern int | 290 | extern int |
@@ -351,6 +358,9 @@ extern int | |||
351 | qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); | 358 | qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); |
352 | 359 | ||
353 | extern int | 360 | extern int |
361 | qla2x00_set_driver_version(scsi_qla_host_t *, char *); | ||
362 | |||
363 | extern int | ||
354 | qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, | 364 | qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, |
355 | uint16_t, uint16_t, uint16_t, uint16_t); | 365 | uint16_t, uint16_t, uint16_t, uint16_t); |
356 | 366 | ||
@@ -436,6 +446,7 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, | |||
436 | uint32_t); | 446 | uint32_t); |
437 | extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, | 447 | extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, |
438 | uint32_t); | 448 | uint32_t); |
449 | extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t); | ||
439 | 450 | ||
440 | extern int qla2x00_beacon_on(struct scsi_qla_host *); | 451 | extern int qla2x00_beacon_on(struct scsi_qla_host *); |
441 | extern int qla2x00_beacon_off(struct scsi_qla_host *); | 452 | extern int qla2x00_beacon_off(struct scsi_qla_host *); |
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 01efc0e9cc36..9b455250c101 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -1328,8 +1328,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) | |||
1328 | /* Manufacturer. */ | 1328 | /* Manufacturer. */ |
1329 | eiter = (struct ct_fdmi_hba_attr *) (entries + size); | 1329 | eiter = (struct ct_fdmi_hba_attr *) (entries + size); |
1330 | eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER); | 1330 | eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER); |
1331 | strcpy(eiter->a.manufacturer, "QLogic Corporation"); | 1331 | alen = strlen(QLA2XXX_MANUFACTURER); |
1332 | alen = strlen(eiter->a.manufacturer); | 1332 | strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1); |
1333 | alen += (alen & 3) ? (4 - (alen & 3)) : 4; | 1333 | alen += (alen & 3) ? (4 - (alen & 3)) : 4; |
1334 | eiter->len = cpu_to_be16(4 + alen); | 1334 | eiter->len = cpu_to_be16(4 + alen); |
1335 | size += 4 + alen; | 1335 | size += 4 + alen; |
@@ -1649,8 +1649,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) | |||
1649 | /* OS device name. */ | 1649 | /* OS device name. */ |
1650 | eiter = (struct ct_fdmi_port_attr *) (entries + size); | 1650 | eiter = (struct ct_fdmi_port_attr *) (entries + size); |
1651 | eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); | 1651 | eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); |
1652 | strcpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME); | 1652 | alen = strlen(QLA2XXX_DRIVER_NAME); |
1653 | alen = strlen(eiter->a.os_dev_name); | 1653 | strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1); |
1654 | alen += (alen & 3) ? (4 - (alen & 3)) : 4; | 1654 | alen += (alen & 3) ? (4 - (alen & 3)) : 4; |
1655 | eiter->len = cpu_to_be16(4 + alen); | 1655 | eiter->len = cpu_to_be16(4 + alen); |
1656 | size += 4 + alen; | 1656 | size += 4 + alen; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 563eee3fa924..edf4d14a1335 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -70,9 +70,7 @@ qla2x00_sp_free(void *data, void *ptr) | |||
70 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; | 70 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; |
71 | 71 | ||
72 | del_timer(&iocb->timer); | 72 | del_timer(&iocb->timer); |
73 | mempool_free(sp, vha->hw->srb_mempool); | 73 | qla2x00_rel_sp(vha, sp); |
74 | |||
75 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
76 | } | 74 | } |
77 | 75 | ||
78 | /* Asynchronous Login/Logout Routines -------------------------------------- */ | 76 | /* Asynchronous Login/Logout Routines -------------------------------------- */ |
@@ -525,7 +523,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
525 | vha->flags.reset_active = 0; | 523 | vha->flags.reset_active = 0; |
526 | ha->flags.pci_channel_io_perm_failure = 0; | 524 | ha->flags.pci_channel_io_perm_failure = 0; |
527 | ha->flags.eeh_busy = 0; | 525 | ha->flags.eeh_busy = 0; |
528 | ha->flags.thermal_supported = 1; | 526 | ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP; |
529 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | 527 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
530 | atomic_set(&vha->loop_state, LOOP_DOWN); | 528 | atomic_set(&vha->loop_state, LOOP_DOWN); |
531 | vha->device_flags = DFLG_NO_CABLE; | 529 | vha->device_flags = DFLG_NO_CABLE; |
@@ -621,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
621 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) | 619 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) |
622 | qla24xx_read_fcp_prio_cfg(vha); | 620 | qla24xx_read_fcp_prio_cfg(vha); |
623 | 621 | ||
622 | qla2x00_set_driver_version(vha, QLA2XXX_VERSION); | ||
623 | |||
624 | return (rval); | 624 | return (rval); |
625 | } | 625 | } |
626 | 626 | ||
@@ -1559,6 +1559,47 @@ done: | |||
1559 | return rval; | 1559 | return rval; |
1560 | } | 1560 | } |
1561 | 1561 | ||
1562 | int | ||
1563 | qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) | ||
1564 | { | ||
1565 | /* Don't try to reallocate the array */ | ||
1566 | if (req->outstanding_cmds) | ||
1567 | return QLA_SUCCESS; | ||
1568 | |||
1569 | if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase && | ||
1570 | (ql2xmultique_tag || ql2xmaxqueues > 1))) | ||
1571 | req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; | ||
1572 | else { | ||
1573 | if (ha->fw_xcb_count <= ha->fw_iocb_count) | ||
1574 | req->num_outstanding_cmds = ha->fw_xcb_count; | ||
1575 | else | ||
1576 | req->num_outstanding_cmds = ha->fw_iocb_count; | ||
1577 | } | ||
1578 | |||
1579 | req->outstanding_cmds = kzalloc(sizeof(srb_t *) * | ||
1580 | req->num_outstanding_cmds, GFP_KERNEL); | ||
1581 | |||
1582 | if (!req->outstanding_cmds) { | ||
1583 | /* | ||
1584 | * Try to allocate a minimal size just so we can get through | ||
1585 | * initialization. | ||
1586 | */ | ||
1587 | req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; | ||
1588 | req->outstanding_cmds = kzalloc(sizeof(srb_t *) * | ||
1589 | req->num_outstanding_cmds, GFP_KERNEL); | ||
1590 | |||
1591 | if (!req->outstanding_cmds) { | ||
1592 | ql_log(ql_log_fatal, NULL, 0x0126, | ||
1593 | "Failed to allocate memory for " | ||
1594 | "outstanding_cmds for req_que %p.\n", req); | ||
1595 | req->num_outstanding_cmds = 0; | ||
1596 | return QLA_FUNCTION_FAILED; | ||
1597 | } | ||
1598 | } | ||
1599 | |||
1600 | return QLA_SUCCESS; | ||
1601 | } | ||
1602 | |||
1562 | /** | 1603 | /** |
1563 | * qla2x00_setup_chip() - Load and start RISC firmware. | 1604 | * qla2x00_setup_chip() - Load and start RISC firmware. |
1564 | * @ha: HA context | 1605 | * @ha: HA context |
@@ -1628,9 +1669,18 @@ enable_82xx_npiv: | |||
1628 | MIN_MULTI_ID_FABRIC - 1; | 1669 | MIN_MULTI_ID_FABRIC - 1; |
1629 | } | 1670 | } |
1630 | qla2x00_get_resource_cnts(vha, NULL, | 1671 | qla2x00_get_resource_cnts(vha, NULL, |
1631 | &ha->fw_xcb_count, NULL, NULL, | 1672 | &ha->fw_xcb_count, NULL, &ha->fw_iocb_count, |
1632 | &ha->max_npiv_vports, NULL); | 1673 | &ha->max_npiv_vports, NULL); |
1633 | 1674 | ||
1675 | /* | ||
1676 | * Allocate the array of outstanding commands | ||
1677 | * now that we know the firmware resources. | ||
1678 | */ | ||
1679 | rval = qla2x00_alloc_outstanding_cmds(ha, | ||
1680 | vha->req); | ||
1681 | if (rval != QLA_SUCCESS) | ||
1682 | goto failed; | ||
1683 | |||
1634 | if (!fw_major_version && ql2xallocfwdump | 1684 | if (!fw_major_version && ql2xallocfwdump |
1635 | && !IS_QLA82XX(ha)) | 1685 | && !IS_QLA82XX(ha)) |
1636 | qla2x00_alloc_fw_dump(vha); | 1686 | qla2x00_alloc_fw_dump(vha); |
@@ -1914,7 +1964,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
1914 | WRT_REG_DWORD(®->isp24.rsp_q_in, 0); | 1964 | WRT_REG_DWORD(®->isp24.rsp_q_in, 0); |
1915 | WRT_REG_DWORD(®->isp24.rsp_q_out, 0); | 1965 | WRT_REG_DWORD(®->isp24.rsp_q_out, 0); |
1916 | } | 1966 | } |
1917 | qlt_24xx_config_rings(vha, reg); | 1967 | qlt_24xx_config_rings(vha); |
1918 | 1968 | ||
1919 | /* PCI posting */ | 1969 | /* PCI posting */ |
1920 | RD_REG_DWORD(&ioreg->hccr); | 1970 | RD_REG_DWORD(&ioreg->hccr); |
@@ -1948,7 +1998,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
1948 | req = ha->req_q_map[que]; | 1998 | req = ha->req_q_map[que]; |
1949 | if (!req) | 1999 | if (!req) |
1950 | continue; | 2000 | continue; |
1951 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) | 2001 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) |
1952 | req->outstanding_cmds[cnt] = NULL; | 2002 | req->outstanding_cmds[cnt] = NULL; |
1953 | 2003 | ||
1954 | req->current_outstanding_cmd = 1; | 2004 | req->current_outstanding_cmd = 1; |
@@ -2157,6 +2207,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
2157 | char connect_type[22]; | 2207 | char connect_type[22]; |
2158 | struct qla_hw_data *ha = vha->hw; | 2208 | struct qla_hw_data *ha = vha->hw; |
2159 | unsigned long flags; | 2209 | unsigned long flags; |
2210 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | ||
2160 | 2211 | ||
2161 | /* Get host addresses. */ | 2212 | /* Get host addresses. */ |
2162 | rval = qla2x00_get_adapter_id(vha, | 2213 | rval = qla2x00_get_adapter_id(vha, |
@@ -2170,6 +2221,13 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
2170 | } else { | 2221 | } else { |
2171 | ql_log(ql_log_warn, vha, 0x2009, | 2222 | ql_log(ql_log_warn, vha, 0x2009, |
2172 | "Unable to get host loop ID.\n"); | 2223 | "Unable to get host loop ID.\n"); |
2224 | if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && | ||
2225 | (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { | ||
2226 | ql_log(ql_log_warn, vha, 0x1151, | ||
2227 | "Doing link init.\n"); | ||
2228 | if (qla24xx_link_initialize(vha) == QLA_SUCCESS) | ||
2229 | return rval; | ||
2230 | } | ||
2173 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 2231 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
2174 | } | 2232 | } |
2175 | return (rval); | 2233 | return (rval); |
@@ -2690,7 +2748,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) | |||
2690 | fcport->loop_id = FC_NO_LOOP_ID; | 2748 | fcport->loop_id = FC_NO_LOOP_ID; |
2691 | qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); | 2749 | qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); |
2692 | fcport->supported_classes = FC_COS_UNSPECIFIED; | 2750 | fcport->supported_classes = FC_COS_UNSPECIFIED; |
2693 | fcport->scan_state = QLA_FCPORT_SCAN_NONE; | ||
2694 | 2751 | ||
2695 | return fcport; | 2752 | return fcport; |
2696 | } | 2753 | } |
@@ -3103,7 +3160,7 @@ static int | |||
3103 | qla2x00_configure_fabric(scsi_qla_host_t *vha) | 3160 | qla2x00_configure_fabric(scsi_qla_host_t *vha) |
3104 | { | 3161 | { |
3105 | int rval; | 3162 | int rval; |
3106 | fc_port_t *fcport; | 3163 | fc_port_t *fcport, *fcptemp; |
3107 | uint16_t next_loopid; | 3164 | uint16_t next_loopid; |
3108 | uint16_t mb[MAILBOX_REGISTER_COUNT]; | 3165 | uint16_t mb[MAILBOX_REGISTER_COUNT]; |
3109 | uint16_t loop_id; | 3166 | uint16_t loop_id; |
@@ -3141,7 +3198,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
3141 | 0xfc, mb, BIT_1|BIT_0); | 3198 | 0xfc, mb, BIT_1|BIT_0); |
3142 | if (rval != QLA_SUCCESS) { | 3199 | if (rval != QLA_SUCCESS) { |
3143 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); | 3200 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); |
3144 | break; | 3201 | return rval; |
3145 | } | 3202 | } |
3146 | if (mb[0] != MBS_COMMAND_COMPLETE) { | 3203 | if (mb[0] != MBS_COMMAND_COMPLETE) { |
3147 | ql_dbg(ql_dbg_disc, vha, 0x2042, | 3204 | ql_dbg(ql_dbg_disc, vha, 0x2042, |
@@ -3173,16 +3230,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
3173 | } | 3230 | } |
3174 | } | 3231 | } |
3175 | 3232 | ||
3233 | #define QLA_FCPORT_SCAN 1 | ||
3234 | #define QLA_FCPORT_FOUND 2 | ||
3235 | |||
3236 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | ||
3237 | fcport->scan_state = QLA_FCPORT_SCAN; | ||
3238 | } | ||
3239 | |||
3176 | rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); | 3240 | rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); |
3177 | if (rval != QLA_SUCCESS) | 3241 | if (rval != QLA_SUCCESS) |
3178 | break; | 3242 | break; |
3179 | 3243 | ||
3180 | /* Add new ports to existing port list */ | 3244 | /* |
3181 | list_splice_tail_init(&new_fcports, &vha->vp_fcports); | 3245 | * Logout all previous fabric devices marked lost, except |
3182 | 3246 | * FCP2 devices. | |
3183 | /* Starting free loop ID. */ | 3247 | */ |
3184 | next_loopid = ha->min_external_loopid; | ||
3185 | |||
3186 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 3248 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
3187 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) | 3249 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) |
3188 | break; | 3250 | break; |
@@ -3190,8 +3252,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
3190 | if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) | 3252 | if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) |
3191 | continue; | 3253 | continue; |
3192 | 3254 | ||
3193 | /* Logout lost/gone fabric devices (non-FCP2) */ | 3255 | if (fcport->scan_state == QLA_FCPORT_SCAN && |
3194 | if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND && | ||
3195 | atomic_read(&fcport->state) == FCS_ONLINE) { | 3256 | atomic_read(&fcport->state) == FCS_ONLINE) { |
3196 | qla2x00_mark_device_lost(vha, fcport, | 3257 | qla2x00_mark_device_lost(vha, fcport, |
3197 | ql2xplogiabsentdevice, 0); | 3258 | ql2xplogiabsentdevice, 0); |
@@ -3204,30 +3265,74 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
3204 | fcport->d_id.b.domain, | 3265 | fcport->d_id.b.domain, |
3205 | fcport->d_id.b.area, | 3266 | fcport->d_id.b.area, |
3206 | fcport->d_id.b.al_pa); | 3267 | fcport->d_id.b.al_pa); |
3268 | fcport->loop_id = FC_NO_LOOP_ID; | ||
3207 | } | 3269 | } |
3208 | continue; | ||
3209 | } | 3270 | } |
3210 | fcport->scan_state = QLA_FCPORT_SCAN_NONE; | 3271 | } |
3211 | 3272 | ||
3212 | /* Login fabric devices that need a login */ | 3273 | /* Starting free loop ID. */ |
3213 | if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 && | 3274 | next_loopid = ha->min_external_loopid; |
3214 | atomic_read(&vha->loop_down_timer) == 0) { | 3275 | |
3215 | if (fcport->loop_id == FC_NO_LOOP_ID) { | 3276 | /* |
3216 | fcport->loop_id = next_loopid; | 3277 | * Scan through our port list and login entries that need to be |
3217 | rval = qla2x00_find_new_loop_id( | 3278 | * logged in. |
3218 | base_vha, fcport); | 3279 | */ |
3219 | if (rval != QLA_SUCCESS) { | 3280 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
3220 | /* Ran out of IDs to use */ | 3281 | if (atomic_read(&vha->loop_down_timer) || |
3221 | continue; | 3282 | test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) |
3222 | } | 3283 | break; |
3284 | |||
3285 | if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || | ||
3286 | (fcport->flags & FCF_LOGIN_NEEDED) == 0) | ||
3287 | continue; | ||
3288 | |||
3289 | if (fcport->loop_id == FC_NO_LOOP_ID) { | ||
3290 | fcport->loop_id = next_loopid; | ||
3291 | rval = qla2x00_find_new_loop_id( | ||
3292 | base_vha, fcport); | ||
3293 | if (rval != QLA_SUCCESS) { | ||
3294 | /* Ran out of IDs to use */ | ||
3295 | break; | ||
3223 | } | 3296 | } |
3224 | } | 3297 | } |
3298 | /* Login and update database */ | ||
3299 | qla2x00_fabric_dev_login(vha, fcport, &next_loopid); | ||
3300 | } | ||
3301 | |||
3302 | /* Exit if out of loop IDs. */ | ||
3303 | if (rval != QLA_SUCCESS) { | ||
3304 | break; | ||
3305 | } | ||
3306 | |||
3307 | /* | ||
3308 | * Login and add the new devices to our port list. | ||
3309 | */ | ||
3310 | list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { | ||
3311 | if (atomic_read(&vha->loop_down_timer) || | ||
3312 | test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) | ||
3313 | break; | ||
3314 | |||
3315 | /* Find a new loop ID to use. */ | ||
3316 | fcport->loop_id = next_loopid; | ||
3317 | rval = qla2x00_find_new_loop_id(base_vha, fcport); | ||
3318 | if (rval != QLA_SUCCESS) { | ||
3319 | /* Ran out of IDs to use */ | ||
3320 | break; | ||
3321 | } | ||
3225 | 3322 | ||
3226 | /* Login and update database */ | 3323 | /* Login and update database */ |
3227 | qla2x00_fabric_dev_login(vha, fcport, &next_loopid); | 3324 | qla2x00_fabric_dev_login(vha, fcport, &next_loopid); |
3325 | |||
3326 | list_move_tail(&fcport->list, &vha->vp_fcports); | ||
3228 | } | 3327 | } |
3229 | } while (0); | 3328 | } while (0); |
3230 | 3329 | ||
3330 | /* Free all new device structures not processed. */ | ||
3331 | list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { | ||
3332 | list_del(&fcport->list); | ||
3333 | kfree(fcport); | ||
3334 | } | ||
3335 | |||
3231 | if (rval) { | 3336 | if (rval) { |
3232 | ql_dbg(ql_dbg_disc, vha, 0x2068, | 3337 | ql_dbg(ql_dbg_disc, vha, 0x2068, |
3233 | "Configure fabric error exit rval=%d.\n", rval); | 3338 | "Configure fabric error exit rval=%d.\n", rval); |
@@ -3263,8 +3368,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3263 | int first_dev, last_dev; | 3368 | int first_dev, last_dev; |
3264 | port_id_t wrap = {}, nxt_d_id; | 3369 | port_id_t wrap = {}, nxt_d_id; |
3265 | struct qla_hw_data *ha = vha->hw; | 3370 | struct qla_hw_data *ha = vha->hw; |
3266 | struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); | 3371 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); |
3267 | struct scsi_qla_host *tvp; | ||
3268 | 3372 | ||
3269 | rval = QLA_SUCCESS; | 3373 | rval = QLA_SUCCESS; |
3270 | 3374 | ||
@@ -3377,22 +3481,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3377 | continue; | 3481 | continue; |
3378 | 3482 | ||
3379 | /* Bypass virtual ports of the same host. */ | 3483 | /* Bypass virtual ports of the same host. */ |
3380 | found = 0; | 3484 | if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) |
3381 | if (ha->num_vhosts) { | 3485 | continue; |
3382 | unsigned long flags; | ||
3383 | |||
3384 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
3385 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { | ||
3386 | if (new_fcport->d_id.b24 == vp->d_id.b24) { | ||
3387 | found = 1; | ||
3388 | break; | ||
3389 | } | ||
3390 | } | ||
3391 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
3392 | |||
3393 | if (found) | ||
3394 | continue; | ||
3395 | } | ||
3396 | 3486 | ||
3397 | /* Bypass if same domain and area of adapter. */ | 3487 | /* Bypass if same domain and area of adapter. */ |
3398 | if (((new_fcport->d_id.b24 & 0xffff00) == | 3488 | if (((new_fcport->d_id.b24 & 0xffff00) == |
@@ -3417,7 +3507,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3417 | WWN_SIZE)) | 3507 | WWN_SIZE)) |
3418 | continue; | 3508 | continue; |
3419 | 3509 | ||
3420 | fcport->scan_state = QLA_FCPORT_SCAN_FOUND; | 3510 | fcport->scan_state = QLA_FCPORT_FOUND; |
3421 | 3511 | ||
3422 | found++; | 3512 | found++; |
3423 | 3513 | ||
@@ -5004,7 +5094,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, | |||
5004 | return rval; | 5094 | return rval; |
5005 | } | 5095 | } |
5006 | 5096 | ||
5007 | #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/" | 5097 | #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" |
5008 | 5098 | ||
5009 | int | 5099 | int |
5010 | qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | 5100 | qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) |
@@ -5529,6 +5619,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
5529 | if (IS_T10_PI_CAPABLE(ha)) | 5619 | if (IS_T10_PI_CAPABLE(ha)) |
5530 | nv->frame_payload_size &= ~7; | 5620 | nv->frame_payload_size &= ~7; |
5531 | 5621 | ||
5622 | qlt_81xx_config_nvram_stage1(vha, nv); | ||
5623 | |||
5532 | /* Reset Initialization control block */ | 5624 | /* Reset Initialization control block */ |
5533 | memset(icb, 0, ha->init_cb_size); | 5625 | memset(icb, 0, ha->init_cb_size); |
5534 | 5626 | ||
@@ -5569,6 +5661,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
5569 | qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), | 5661 | qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), |
5570 | "QLE8XXX"); | 5662 | "QLE8XXX"); |
5571 | 5663 | ||
5664 | qlt_81xx_config_nvram_stage2(vha, icb); | ||
5665 | |||
5572 | /* Use alternate WWN? */ | 5666 | /* Use alternate WWN? */ |
5573 | if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { | 5667 | if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { |
5574 | memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); | 5668 | memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); |
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index c0462c04c885..68e2c4afc134 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -198,6 +198,13 @@ done: | |||
198 | } | 198 | } |
199 | 199 | ||
200 | static inline void | 200 | static inline void |
201 | qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp) | ||
202 | { | ||
203 | mempool_free(sp, vha->hw->srb_mempool); | ||
204 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
205 | } | ||
206 | |||
207 | static inline void | ||
201 | qla2x00_init_timer(srb_t *sp, unsigned long tmo) | 208 | qla2x00_init_timer(srb_t *sp, unsigned long tmo) |
202 | { | 209 | { |
203 | init_timer(&sp->u.iocb_cmd.timer); | 210 | init_timer(&sp->u.iocb_cmd.timer); |
@@ -213,3 +220,22 @@ qla2x00_gid_list_size(struct qla_hw_data *ha) | |||
213 | { | 220 | { |
214 | return sizeof(struct gid_list_info) * ha->max_fibre_devices; | 221 | return sizeof(struct gid_list_info) * ha->max_fibre_devices; |
215 | } | 222 | } |
223 | |||
224 | static inline void | ||
225 | qla2x00_do_host_ramp_up(scsi_qla_host_t *vha) | ||
226 | { | ||
227 | if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth) | ||
228 | return; | ||
229 | |||
230 | /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */ | ||
231 | if (time_before(jiffies, (vha->hw->host_last_rampdown_time + | ||
232 | HOST_QUEUE_RAMPDOWN_INTERVAL))) | ||
233 | return; | ||
234 | |||
235 | /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */ | ||
236 | if (time_before(jiffies, (vha->hw->host_last_rampup_time + | ||
237 | HOST_QUEUE_RAMPUP_INTERVAL))) | ||
238 | return; | ||
239 | |||
240 | set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags); | ||
241 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index a481684479c1..d2630317cce8 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -349,14 +349,14 @@ qla2x00_start_scsi(srb_t *sp) | |||
349 | 349 | ||
350 | /* Check for room in outstanding command list. */ | 350 | /* Check for room in outstanding command list. */ |
351 | handle = req->current_outstanding_cmd; | 351 | handle = req->current_outstanding_cmd; |
352 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 352 | for (index = 1; index < req->num_outstanding_cmds; index++) { |
353 | handle++; | 353 | handle++; |
354 | if (handle == MAX_OUTSTANDING_COMMANDS) | 354 | if (handle == req->num_outstanding_cmds) |
355 | handle = 1; | 355 | handle = 1; |
356 | if (!req->outstanding_cmds[handle]) | 356 | if (!req->outstanding_cmds[handle]) |
357 | break; | 357 | break; |
358 | } | 358 | } |
359 | if (index == MAX_OUTSTANDING_COMMANDS) | 359 | if (index == req->num_outstanding_cmds) |
360 | goto queuing_error; | 360 | goto queuing_error; |
361 | 361 | ||
362 | /* Map the sg table so we have an accurate count of sg entries needed */ | 362 | /* Map the sg table so we have an accurate count of sg entries needed */ |
@@ -1467,16 +1467,15 @@ qla24xx_start_scsi(srb_t *sp) | |||
1467 | 1467 | ||
1468 | /* Check for room in outstanding command list. */ | 1468 | /* Check for room in outstanding command list. */ |
1469 | handle = req->current_outstanding_cmd; | 1469 | handle = req->current_outstanding_cmd; |
1470 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 1470 | for (index = 1; index < req->num_outstanding_cmds; index++) { |
1471 | handle++; | 1471 | handle++; |
1472 | if (handle == MAX_OUTSTANDING_COMMANDS) | 1472 | if (handle == req->num_outstanding_cmds) |
1473 | handle = 1; | 1473 | handle = 1; |
1474 | if (!req->outstanding_cmds[handle]) | 1474 | if (!req->outstanding_cmds[handle]) |
1475 | break; | 1475 | break; |
1476 | } | 1476 | } |
1477 | if (index == MAX_OUTSTANDING_COMMANDS) { | 1477 | if (index == req->num_outstanding_cmds) |
1478 | goto queuing_error; | 1478 | goto queuing_error; |
1479 | } | ||
1480 | 1479 | ||
1481 | /* Map the sg table so we have an accurate count of sg entries needed */ | 1480 | /* Map the sg table so we have an accurate count of sg entries needed */ |
1482 | if (scsi_sg_count(cmd)) { | 1481 | if (scsi_sg_count(cmd)) { |
@@ -1641,15 +1640,15 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1641 | 1640 | ||
1642 | /* Check for room in outstanding command list. */ | 1641 | /* Check for room in outstanding command list. */ |
1643 | handle = req->current_outstanding_cmd; | 1642 | handle = req->current_outstanding_cmd; |
1644 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 1643 | for (index = 1; index < req->num_outstanding_cmds; index++) { |
1645 | handle++; | 1644 | handle++; |
1646 | if (handle == MAX_OUTSTANDING_COMMANDS) | 1645 | if (handle == req->num_outstanding_cmds) |
1647 | handle = 1; | 1646 | handle = 1; |
1648 | if (!req->outstanding_cmds[handle]) | 1647 | if (!req->outstanding_cmds[handle]) |
1649 | break; | 1648 | break; |
1650 | } | 1649 | } |
1651 | 1650 | ||
1652 | if (index == MAX_OUTSTANDING_COMMANDS) | 1651 | if (index == req->num_outstanding_cmds) |
1653 | goto queuing_error; | 1652 | goto queuing_error; |
1654 | 1653 | ||
1655 | /* Compute number of required data segments */ | 1654 | /* Compute number of required data segments */ |
@@ -1822,14 +1821,14 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) | |||
1822 | 1821 | ||
1823 | /* Check for room in outstanding command list. */ | 1822 | /* Check for room in outstanding command list. */ |
1824 | handle = req->current_outstanding_cmd; | 1823 | handle = req->current_outstanding_cmd; |
1825 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 1824 | for (index = 1; req->num_outstanding_cmds; index++) { |
1826 | handle++; | 1825 | handle++; |
1827 | if (handle == MAX_OUTSTANDING_COMMANDS) | 1826 | if (handle == req->num_outstanding_cmds) |
1828 | handle = 1; | 1827 | handle = 1; |
1829 | if (!req->outstanding_cmds[handle]) | 1828 | if (!req->outstanding_cmds[handle]) |
1830 | break; | 1829 | break; |
1831 | } | 1830 | } |
1832 | if (index == MAX_OUTSTANDING_COMMANDS) { | 1831 | if (index == req->num_outstanding_cmds) { |
1833 | ql_log(ql_log_warn, vha, 0x700b, | 1832 | ql_log(ql_log_warn, vha, 0x700b, |
1834 | "No room on outstanding cmd array.\n"); | 1833 | "No room on outstanding cmd array.\n"); |
1835 | goto queuing_error; | 1834 | goto queuing_error; |
@@ -2263,14 +2262,14 @@ qla82xx_start_scsi(srb_t *sp) | |||
2263 | 2262 | ||
2264 | /* Check for room in outstanding command list. */ | 2263 | /* Check for room in outstanding command list. */ |
2265 | handle = req->current_outstanding_cmd; | 2264 | handle = req->current_outstanding_cmd; |
2266 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 2265 | for (index = 1; index < req->num_outstanding_cmds; index++) { |
2267 | handle++; | 2266 | handle++; |
2268 | if (handle == MAX_OUTSTANDING_COMMANDS) | 2267 | if (handle == req->num_outstanding_cmds) |
2269 | handle = 1; | 2268 | handle = 1; |
2270 | if (!req->outstanding_cmds[handle]) | 2269 | if (!req->outstanding_cmds[handle]) |
2271 | break; | 2270 | break; |
2272 | } | 2271 | } |
2273 | if (index == MAX_OUTSTANDING_COMMANDS) | 2272 | if (index == req->num_outstanding_cmds) |
2274 | goto queuing_error; | 2273 | goto queuing_error; |
2275 | 2274 | ||
2276 | /* Map the sg table so we have an accurate count of sg entries needed */ | 2275 | /* Map the sg table so we have an accurate count of sg entries needed */ |
@@ -2767,15 +2766,15 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) | |||
2767 | 2766 | ||
2768 | /* Check for room in outstanding command list. */ | 2767 | /* Check for room in outstanding command list. */ |
2769 | handle = req->current_outstanding_cmd; | 2768 | handle = req->current_outstanding_cmd; |
2770 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 2769 | for (index = 1; index < req->num_outstanding_cmds; index++) { |
2771 | handle++; | 2770 | handle++; |
2772 | if (handle == MAX_OUTSTANDING_COMMANDS) | 2771 | if (handle == req->num_outstanding_cmds) |
2773 | handle = 1; | 2772 | handle = 1; |
2774 | if (!req->outstanding_cmds[handle]) | 2773 | if (!req->outstanding_cmds[handle]) |
2775 | break; | 2774 | break; |
2776 | } | 2775 | } |
2777 | 2776 | ||
2778 | if (index == MAX_OUTSTANDING_COMMANDS) { | 2777 | if (index == req->num_outstanding_cmds) { |
2779 | rval = EXT_STATUS_BUSY; | 2778 | rval = EXT_STATUS_BUSY; |
2780 | goto queuing_error; | 2779 | goto queuing_error; |
2781 | } | 2780 | } |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 873c82014b16..e9dbd74c20d3 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -13,6 +13,8 @@ | |||
13 | #include <scsi/scsi_bsg_fc.h> | 13 | #include <scsi/scsi_bsg_fc.h> |
14 | #include <scsi/scsi_eh.h> | 14 | #include <scsi/scsi_eh.h> |
15 | 15 | ||
16 | #include "qla_target.h" | ||
17 | |||
16 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); | 18 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); |
17 | static void qla2x00_process_completed_request(struct scsi_qla_host *, | 19 | static void qla2x00_process_completed_request(struct scsi_qla_host *, |
18 | struct req_que *, uint32_t); | 20 | struct req_que *, uint32_t); |
@@ -489,10 +491,37 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) | |||
489 | if (mb[1] & IDC_DEVICE_STATE_CHANGE) { | 491 | if (mb[1] & IDC_DEVICE_STATE_CHANGE) { |
490 | ql_log(ql_log_info, vha, 0x506a, | 492 | ql_log(ql_log_info, vha, 0x506a, |
491 | "IDC Device-State changed = 0x%x.\n", mb[4]); | 493 | "IDC Device-State changed = 0x%x.\n", mb[4]); |
494 | if (ha->flags.nic_core_reset_owner) | ||
495 | return; | ||
492 | qla83xx_schedule_work(vha, MBA_IDC_AEN); | 496 | qla83xx_schedule_work(vha, MBA_IDC_AEN); |
493 | } | 497 | } |
494 | } | 498 | } |
495 | 499 | ||
500 | int | ||
501 | qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) | ||
502 | { | ||
503 | struct qla_hw_data *ha = vha->hw; | ||
504 | scsi_qla_host_t *vp; | ||
505 | uint32_t vp_did; | ||
506 | unsigned long flags; | ||
507 | int ret = 0; | ||
508 | |||
509 | if (!ha->num_vhosts) | ||
510 | return ret; | ||
511 | |||
512 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
513 | list_for_each_entry(vp, &ha->vp_list, list) { | ||
514 | vp_did = vp->d_id.b24; | ||
515 | if (vp_did == rscn_entry) { | ||
516 | ret = 1; | ||
517 | break; | ||
518 | } | ||
519 | } | ||
520 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
521 | |||
522 | return ret; | ||
523 | } | ||
524 | |||
496 | /** | 525 | /** |
497 | * qla2x00_async_event() - Process aynchronous events. | 526 | * qla2x00_async_event() - Process aynchronous events. |
498 | * @ha: SCSI driver HA context | 527 | * @ha: SCSI driver HA context |
@@ -899,6 +928,10 @@ skip_rio: | |||
899 | /* Ignore reserved bits from RSCN-payload. */ | 928 | /* Ignore reserved bits from RSCN-payload. */ |
900 | rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; | 929 | rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; |
901 | 930 | ||
931 | /* Skip RSCNs for virtual ports on the same physical port */ | ||
932 | if (qla2x00_is_a_vp_did(vha, rscn_entry)) | ||
933 | break; | ||
934 | |||
902 | atomic_set(&vha->loop_down_timer, 0); | 935 | atomic_set(&vha->loop_down_timer, 0); |
903 | vha->flags.management_server_logged_in = 0; | 936 | vha->flags.management_server_logged_in = 0; |
904 | 937 | ||
@@ -983,14 +1016,25 @@ skip_rio: | |||
983 | mb[1], mb[2], mb[3]); | 1016 | mb[1], mb[2], mb[3]); |
984 | break; | 1017 | break; |
985 | case MBA_IDC_NOTIFY: | 1018 | case MBA_IDC_NOTIFY: |
986 | /* See if we need to quiesce any I/O */ | 1019 | if (IS_QLA8031(vha->hw)) { |
987 | if (IS_QLA8031(vha->hw)) | 1020 | mb[4] = RD_REG_WORD(®24->mailbox4); |
988 | if ((mb[2] & 0x7fff) == MBC_PORT_RESET || | 1021 | if (((mb[2] & 0x7fff) == MBC_PORT_RESET || |
989 | (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) { | 1022 | (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && |
1023 | (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { | ||
990 | set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); | 1024 | set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); |
1025 | /* | ||
1026 | * Extend loop down timer since port is active. | ||
1027 | */ | ||
1028 | if (atomic_read(&vha->loop_state) == LOOP_DOWN) | ||
1029 | atomic_set(&vha->loop_down_timer, | ||
1030 | LOOP_DOWN_TIME); | ||
991 | qla2xxx_wake_dpc(vha); | 1031 | qla2xxx_wake_dpc(vha); |
992 | } | 1032 | } |
1033 | } | ||
993 | case MBA_IDC_COMPLETE: | 1034 | case MBA_IDC_COMPLETE: |
1035 | if (ha->notify_lb_portup_comp) | ||
1036 | complete(&ha->lb_portup_comp); | ||
1037 | /* Fallthru */ | ||
994 | case MBA_IDC_TIME_EXT: | 1038 | case MBA_IDC_TIME_EXT: |
995 | if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) | 1039 | if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) |
996 | qla81xx_idc_event(vha, mb[0], mb[1]); | 1040 | qla81xx_idc_event(vha, mb[0], mb[1]); |
@@ -1029,7 +1073,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, | |||
1029 | struct qla_hw_data *ha = vha->hw; | 1073 | struct qla_hw_data *ha = vha->hw; |
1030 | 1074 | ||
1031 | /* Validate handle. */ | 1075 | /* Validate handle. */ |
1032 | if (index >= MAX_OUTSTANDING_COMMANDS) { | 1076 | if (index >= req->num_outstanding_cmds) { |
1033 | ql_log(ql_log_warn, vha, 0x3014, | 1077 | ql_log(ql_log_warn, vha, 0x3014, |
1034 | "Invalid SCSI command index (%x).\n", index); | 1078 | "Invalid SCSI command index (%x).\n", index); |
1035 | 1079 | ||
@@ -1067,7 +1111,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, | |||
1067 | uint16_t index; | 1111 | uint16_t index; |
1068 | 1112 | ||
1069 | index = LSW(pkt->handle); | 1113 | index = LSW(pkt->handle); |
1070 | if (index >= MAX_OUTSTANDING_COMMANDS) { | 1114 | if (index >= req->num_outstanding_cmds) { |
1071 | ql_log(ql_log_warn, vha, 0x5031, | 1115 | ql_log(ql_log_warn, vha, 0x5031, |
1072 | "Invalid command index (%x).\n", index); | 1116 | "Invalid command index (%x).\n", index); |
1073 | if (IS_QLA82XX(ha)) | 1117 | if (IS_QLA82XX(ha)) |
@@ -1740,7 +1784,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, | |||
1740 | sts24 = (struct sts_entry_24xx *) pkt; | 1784 | sts24 = (struct sts_entry_24xx *) pkt; |
1741 | 1785 | ||
1742 | /* Validate handle. */ | 1786 | /* Validate handle. */ |
1743 | if (index >= MAX_OUTSTANDING_COMMANDS) { | 1787 | if (index >= req->num_outstanding_cmds) { |
1744 | ql_log(ql_log_warn, vha, 0x70af, | 1788 | ql_log(ql_log_warn, vha, 0x70af, |
1745 | "Invalid SCSI completion handle 0x%x.\n", index); | 1789 | "Invalid SCSI completion handle 0x%x.\n", index); |
1746 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 1790 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
@@ -1910,9 +1954,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1910 | req = ha->req_q_map[que]; | 1954 | req = ha->req_q_map[que]; |
1911 | 1955 | ||
1912 | /* Validate handle. */ | 1956 | /* Validate handle. */ |
1913 | if (handle < MAX_OUTSTANDING_COMMANDS) { | 1957 | if (handle < req->num_outstanding_cmds) |
1914 | sp = req->outstanding_cmds[handle]; | 1958 | sp = req->outstanding_cmds[handle]; |
1915 | } else | 1959 | else |
1916 | sp = NULL; | 1960 | sp = NULL; |
1917 | 1961 | ||
1918 | if (sp == NULL) { | 1962 | if (sp == NULL) { |
@@ -1934,6 +1978,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1934 | 1978 | ||
1935 | /* Fast path completion. */ | 1979 | /* Fast path completion. */ |
1936 | if (comp_status == CS_COMPLETE && scsi_status == 0) { | 1980 | if (comp_status == CS_COMPLETE && scsi_status == 0) { |
1981 | qla2x00_do_host_ramp_up(vha); | ||
1937 | qla2x00_process_completed_request(vha, req, handle); | 1982 | qla2x00_process_completed_request(vha, req, handle); |
1938 | 1983 | ||
1939 | return; | 1984 | return; |
@@ -2193,6 +2238,9 @@ out: | |||
2193 | cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, | 2238 | cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, |
2194 | resid_len, fw_resid_len); | 2239 | resid_len, fw_resid_len); |
2195 | 2240 | ||
2241 | if (!res) | ||
2242 | qla2x00_do_host_ramp_up(vha); | ||
2243 | |||
2196 | if (rsp->status_srb == NULL) | 2244 | if (rsp->status_srb == NULL) |
2197 | sp->done(ha, sp, res); | 2245 | sp->done(ha, sp, res); |
2198 | } | 2246 | } |
@@ -2747,6 +2795,12 @@ static struct qla_init_msix_entry qla82xx_msix_entries[2] = { | |||
2747 | { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, | 2795 | { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, |
2748 | }; | 2796 | }; |
2749 | 2797 | ||
2798 | static struct qla_init_msix_entry qla83xx_msix_entries[3] = { | ||
2799 | { "qla2xxx (default)", qla24xx_msix_default }, | ||
2800 | { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, | ||
2801 | { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, | ||
2802 | }; | ||
2803 | |||
2750 | static void | 2804 | static void |
2751 | qla24xx_disable_msix(struct qla_hw_data *ha) | 2805 | qla24xx_disable_msix(struct qla_hw_data *ha) |
2752 | { | 2806 | { |
@@ -2827,9 +2881,13 @@ msix_failed: | |||
2827 | } | 2881 | } |
2828 | 2882 | ||
2829 | /* Enable MSI-X vectors for the base queue */ | 2883 | /* Enable MSI-X vectors for the base queue */ |
2830 | for (i = 0; i < 2; i++) { | 2884 | for (i = 0; i < ha->msix_count; i++) { |
2831 | qentry = &ha->msix_entries[i]; | 2885 | qentry = &ha->msix_entries[i]; |
2832 | if (IS_QLA82XX(ha)) { | 2886 | if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { |
2887 | ret = request_irq(qentry->vector, | ||
2888 | qla83xx_msix_entries[i].handler, | ||
2889 | 0, qla83xx_msix_entries[i].name, rsp); | ||
2890 | } else if (IS_QLA82XX(ha)) { | ||
2833 | ret = request_irq(qentry->vector, | 2891 | ret = request_irq(qentry->vector, |
2834 | qla82xx_msix_entries[i].handler, | 2892 | qla82xx_msix_entries[i].handler, |
2835 | 0, qla82xx_msix_entries[i].name, rsp); | 2893 | 0, qla82xx_msix_entries[i].name, rsp); |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 68c55eaa318c..186dd59ce4fa 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -900,13 +900,13 @@ qla2x00_abort_command(srb_t *sp) | |||
900 | "Entered %s.\n", __func__); | 900 | "Entered %s.\n", __func__); |
901 | 901 | ||
902 | spin_lock_irqsave(&ha->hardware_lock, flags); | 902 | spin_lock_irqsave(&ha->hardware_lock, flags); |
903 | for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { | 903 | for (handle = 1; handle < req->num_outstanding_cmds; handle++) { |
904 | if (req->outstanding_cmds[handle] == sp) | 904 | if (req->outstanding_cmds[handle] == sp) |
905 | break; | 905 | break; |
906 | } | 906 | } |
907 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 907 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
908 | 908 | ||
909 | if (handle == MAX_OUTSTANDING_COMMANDS) { | 909 | if (handle == req->num_outstanding_cmds) { |
910 | /* command not found */ | 910 | /* command not found */ |
911 | return QLA_FUNCTION_FAILED; | 911 | return QLA_FUNCTION_FAILED; |
912 | } | 912 | } |
@@ -1633,6 +1633,54 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, | |||
1633 | } | 1633 | } |
1634 | 1634 | ||
1635 | /* | 1635 | /* |
1636 | * qla24xx_link_initialization | ||
1637 | * Issue link initialization mailbox command. | ||
1638 | * | ||
1639 | * Input: | ||
1640 | * ha = adapter block pointer. | ||
1641 | * TARGET_QUEUE_LOCK must be released. | ||
1642 | * ADAPTER_STATE_LOCK must be released. | ||
1643 | * | ||
1644 | * Returns: | ||
1645 | * qla2x00 local function return status code. | ||
1646 | * | ||
1647 | * Context: | ||
1648 | * Kernel context. | ||
1649 | */ | ||
1650 | int | ||
1651 | qla24xx_link_initialize(scsi_qla_host_t *vha) | ||
1652 | { | ||
1653 | int rval; | ||
1654 | mbx_cmd_t mc; | ||
1655 | mbx_cmd_t *mcp = &mc; | ||
1656 | |||
1657 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, | ||
1658 | "Entered %s.\n", __func__); | ||
1659 | |||
1660 | if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) | ||
1661 | return QLA_FUNCTION_FAILED; | ||
1662 | |||
1663 | mcp->mb[0] = MBC_LINK_INITIALIZATION; | ||
1664 | mcp->mb[1] = BIT_6|BIT_4; | ||
1665 | mcp->mb[2] = 0; | ||
1666 | mcp->mb[3] = 0; | ||
1667 | mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; | ||
1668 | mcp->in_mb = MBX_0; | ||
1669 | mcp->tov = MBX_TOV_SECONDS; | ||
1670 | mcp->flags = 0; | ||
1671 | rval = qla2x00_mailbox_command(vha, mcp); | ||
1672 | |||
1673 | if (rval != QLA_SUCCESS) { | ||
1674 | ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); | ||
1675 | } else { | ||
1676 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, | ||
1677 | "Done %s.\n", __func__); | ||
1678 | } | ||
1679 | |||
1680 | return rval; | ||
1681 | } | ||
1682 | |||
1683 | /* | ||
1636 | * qla2x00_lip_reset | 1684 | * qla2x00_lip_reset |
1637 | * Issue LIP reset mailbox command. | 1685 | * Issue LIP reset mailbox command. |
1638 | * | 1686 | * |
@@ -2535,12 +2583,12 @@ qla24xx_abort_command(srb_t *sp) | |||
2535 | "Entered %s.\n", __func__); | 2583 | "Entered %s.\n", __func__); |
2536 | 2584 | ||
2537 | spin_lock_irqsave(&ha->hardware_lock, flags); | 2585 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2538 | for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { | 2586 | for (handle = 1; handle < req->num_outstanding_cmds; handle++) { |
2539 | if (req->outstanding_cmds[handle] == sp) | 2587 | if (req->outstanding_cmds[handle] == sp) |
2540 | break; | 2588 | break; |
2541 | } | 2589 | } |
2542 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2590 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2543 | if (handle == MAX_OUTSTANDING_COMMANDS) { | 2591 | if (handle == req->num_outstanding_cmds) { |
2544 | /* Command not found. */ | 2592 | /* Command not found. */ |
2545 | return QLA_FUNCTION_FAILED; | 2593 | return QLA_FUNCTION_FAILED; |
2546 | } | 2594 | } |
@@ -3093,6 +3141,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
3093 | struct qla_hw_data *ha = vha->hw; | 3141 | struct qla_hw_data *ha = vha->hw; |
3094 | scsi_qla_host_t *vp; | 3142 | scsi_qla_host_t *vp; |
3095 | unsigned long flags; | 3143 | unsigned long flags; |
3144 | int found; | ||
3096 | 3145 | ||
3097 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, | 3146 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, |
3098 | "Entered %s.\n", __func__); | 3147 | "Entered %s.\n", __func__); |
@@ -3128,13 +3177,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
3128 | return; | 3177 | return; |
3129 | } | 3178 | } |
3130 | 3179 | ||
3180 | found = 0; | ||
3131 | spin_lock_irqsave(&ha->vport_slock, flags); | 3181 | spin_lock_irqsave(&ha->vport_slock, flags); |
3132 | list_for_each_entry(vp, &ha->vp_list, list) | 3182 | list_for_each_entry(vp, &ha->vp_list, list) { |
3133 | if (vp_idx == vp->vp_idx) | 3183 | if (vp_idx == vp->vp_idx) { |
3184 | found = 1; | ||
3134 | break; | 3185 | break; |
3186 | } | ||
3187 | } | ||
3135 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 3188 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
3136 | 3189 | ||
3137 | if (!vp) | 3190 | if (!found) |
3138 | return; | 3191 | return; |
3139 | 3192 | ||
3140 | vp->d_id.b.domain = rptid_entry->port_id[2]; | 3193 | vp->d_id.b.domain = rptid_entry->port_id[2]; |
@@ -3814,6 +3867,97 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) | |||
3814 | } | 3867 | } |
3815 | 3868 | ||
3816 | int | 3869 | int |
3870 | qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) | ||
3871 | { | ||
3872 | int rval; | ||
3873 | mbx_cmd_t mc; | ||
3874 | mbx_cmd_t *mcp = &mc; | ||
3875 | int len; | ||
3876 | uint16_t dwlen; | ||
3877 | uint8_t *str; | ||
3878 | dma_addr_t str_dma; | ||
3879 | struct qla_hw_data *ha = vha->hw; | ||
3880 | |||
3881 | if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha)) | ||
3882 | return QLA_FUNCTION_FAILED; | ||
3883 | |||
3884 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155, | ||
3885 | "Entered %s.\n", __func__); | ||
3886 | |||
3887 | str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); | ||
3888 | if (!str) { | ||
3889 | ql_log(ql_log_warn, vha, 0x1156, | ||
3890 | "Failed to allocate driver version param.\n"); | ||
3891 | return QLA_MEMORY_ALLOC_FAILED; | ||
3892 | } | ||
3893 | |||
3894 | memcpy(str, "\x7\x3\x11\x0", 4); | ||
3895 | dwlen = str[0]; | ||
3896 | len = dwlen * sizeof(uint32_t) - 4; | ||
3897 | memset(str + 4, 0, len); | ||
3898 | if (len > strlen(version)) | ||
3899 | len = strlen(version); | ||
3900 | memcpy(str + 4, version, len); | ||
3901 | |||
3902 | mcp->mb[0] = MBC_SET_RNID_PARAMS; | ||
3903 | mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; | ||
3904 | mcp->mb[2] = MSW(LSD(str_dma)); | ||
3905 | mcp->mb[3] = LSW(LSD(str_dma)); | ||
3906 | mcp->mb[6] = MSW(MSD(str_dma)); | ||
3907 | mcp->mb[7] = LSW(MSD(str_dma)); | ||
3908 | mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | ||
3909 | mcp->in_mb = MBX_0; | ||
3910 | mcp->tov = MBX_TOV_SECONDS; | ||
3911 | mcp->flags = 0; | ||
3912 | rval = qla2x00_mailbox_command(vha, mcp); | ||
3913 | |||
3914 | if (rval != QLA_SUCCESS) { | ||
3915 | ql_dbg(ql_dbg_mbx, vha, 0x1157, | ||
3916 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
3917 | } else { | ||
3918 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158, | ||
3919 | "Done %s.\n", __func__); | ||
3920 | } | ||
3921 | |||
3922 | dma_pool_free(ha->s_dma_pool, str, str_dma); | ||
3923 | |||
3924 | return rval; | ||
3925 | } | ||
3926 | |||
3927 | static int | ||
3928 | qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) | ||
3929 | { | ||
3930 | int rval; | ||
3931 | mbx_cmd_t mc; | ||
3932 | mbx_cmd_t *mcp = &mc; | ||
3933 | |||
3934 | if (!IS_FWI2_CAPABLE(vha->hw)) | ||
3935 | return QLA_FUNCTION_FAILED; | ||
3936 | |||
3937 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, | ||
3938 | "Entered %s.\n", __func__); | ||
3939 | |||
3940 | mcp->mb[0] = MBC_GET_RNID_PARAMS; | ||
3941 | mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; | ||
3942 | mcp->out_mb = MBX_1|MBX_0; | ||
3943 | mcp->in_mb = MBX_1|MBX_0; | ||
3944 | mcp->tov = MBX_TOV_SECONDS; | ||
3945 | mcp->flags = 0; | ||
3946 | rval = qla2x00_mailbox_command(vha, mcp); | ||
3947 | *temp = mcp->mb[1]; | ||
3948 | |||
3949 | if (rval != QLA_SUCCESS) { | ||
3950 | ql_dbg(ql_dbg_mbx, vha, 0x115a, | ||
3951 | "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); | ||
3952 | } else { | ||
3953 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, | ||
3954 | "Done %s.\n", __func__); | ||
3955 | } | ||
3956 | |||
3957 | return rval; | ||
3958 | } | ||
3959 | |||
3960 | int | ||
3817 | qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, | 3961 | qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, |
3818 | uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) | 3962 | uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) |
3819 | { | 3963 | { |
@@ -4415,38 +4559,45 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, | |||
4415 | } | 4559 | } |
4416 | 4560 | ||
4417 | int | 4561 | int |
4418 | qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) | 4562 | qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) |
4419 | { | 4563 | { |
4420 | int rval; | 4564 | int rval = QLA_FUNCTION_FAILED; |
4421 | uint8_t byte; | ||
4422 | struct qla_hw_data *ha = vha->hw; | 4565 | struct qla_hw_data *ha = vha->hw; |
4566 | uint8_t byte; | ||
4423 | 4567 | ||
4424 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca, | 4568 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca, |
4425 | "Entered %s.\n", __func__); | 4569 | "Entered %s.\n", __func__); |
4426 | 4570 | ||
4427 | /* Integer part */ | 4571 | if (ha->thermal_support & THERMAL_SUPPORT_I2C) { |
4428 | rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, | 4572 | rval = qla2x00_read_sfp(vha, 0, &byte, |
4429 | BIT_13|BIT_12|BIT_0); | 4573 | 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0); |
4430 | if (rval != QLA_SUCCESS) { | 4574 | *temp = byte; |
4431 | ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval); | 4575 | if (rval == QLA_SUCCESS) |
4432 | ha->flags.thermal_supported = 0; | 4576 | goto done; |
4433 | goto fail; | 4577 | |
4578 | ql_log(ql_log_warn, vha, 0x10c9, | ||
4579 | "Thermal not supported by I2C.\n"); | ||
4580 | ha->thermal_support &= ~THERMAL_SUPPORT_I2C; | ||
4434 | } | 4581 | } |
4435 | *temp = byte; | ||
4436 | 4582 | ||
4437 | /* Fraction part */ | 4583 | if (ha->thermal_support & THERMAL_SUPPORT_ISP) { |
4438 | rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, | 4584 | rval = qla2x00_read_asic_temperature(vha, temp); |
4439 | BIT_13|BIT_12|BIT_0); | 4585 | if (rval == QLA_SUCCESS) |
4440 | if (rval != QLA_SUCCESS) { | 4586 | goto done; |
4441 | ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval); | 4587 | |
4442 | ha->flags.thermal_supported = 0; | 4588 | ql_log(ql_log_warn, vha, 0x1019, |
4443 | goto fail; | 4589 | "Thermal not supported by ISP.\n"); |
4590 | ha->thermal_support &= ~THERMAL_SUPPORT_ISP; | ||
4444 | } | 4591 | } |
4445 | *frac = (byte >> 6) * 25; | ||
4446 | 4592 | ||
4593 | ql_log(ql_log_warn, vha, 0x1150, | ||
4594 | "Thermal not supported by this card " | ||
4595 | "(ignoring further requests).\n"); | ||
4596 | return rval; | ||
4597 | |||
4598 | done: | ||
4447 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018, | 4599 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018, |
4448 | "Done %s.\n", __func__); | 4600 | "Done %s.\n", __func__); |
4449 | fail: | ||
4450 | return rval; | 4601 | return rval; |
4451 | } | 4602 | } |
4452 | 4603 | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 20fd974f903a..f868a9f98afe 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -523,6 +523,7 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
523 | clear_bit(que_id, ha->req_qid_map); | 523 | clear_bit(que_id, ha->req_qid_map); |
524 | mutex_unlock(&ha->vport_lock); | 524 | mutex_unlock(&ha->vport_lock); |
525 | } | 525 | } |
526 | kfree(req->outstanding_cmds); | ||
526 | kfree(req); | 527 | kfree(req); |
527 | req = NULL; | 528 | req = NULL; |
528 | } | 529 | } |
@@ -649,6 +650,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, | |||
649 | goto que_failed; | 650 | goto que_failed; |
650 | } | 651 | } |
651 | 652 | ||
653 | ret = qla2x00_alloc_outstanding_cmds(ha, req); | ||
654 | if (ret != QLA_SUCCESS) | ||
655 | goto que_failed; | ||
656 | |||
652 | mutex_lock(&ha->vport_lock); | 657 | mutex_lock(&ha->vport_lock); |
653 | que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); | 658 | que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); |
654 | if (que_id >= ha->max_req_queues) { | 659 | if (que_id >= ha->max_req_queues) { |
@@ -685,7 +690,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, | |||
685 | "options=0x%x.\n", req->options); | 690 | "options=0x%x.\n", req->options); |
686 | ql_dbg(ql_dbg_init, base_vha, 0x00dd, | 691 | ql_dbg(ql_dbg_init, base_vha, 0x00dd, |
687 | "options=0x%x.\n", req->options); | 692 | "options=0x%x.\n", req->options); |
688 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) | 693 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) |
689 | req->outstanding_cmds[cnt] = NULL; | 694 | req->outstanding_cmds[cnt] = NULL; |
690 | req->current_outstanding_cmd = 1; | 695 | req->current_outstanding_cmd = 1; |
691 | 696 | ||
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 3e3f593bada3..10754f518303 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -847,14 +847,21 @@ static int | |||
847 | qla82xx_rom_lock(struct qla_hw_data *ha) | 847 | qla82xx_rom_lock(struct qla_hw_data *ha) |
848 | { | 848 | { |
849 | int done = 0, timeout = 0; | 849 | int done = 0, timeout = 0; |
850 | uint32_t lock_owner = 0; | ||
851 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | ||
850 | 852 | ||
851 | while (!done) { | 853 | while (!done) { |
852 | /* acquire semaphore2 from PCI HW block */ | 854 | /* acquire semaphore2 from PCI HW block */ |
853 | done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); | 855 | done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); |
854 | if (done == 1) | 856 | if (done == 1) |
855 | break; | 857 | break; |
856 | if (timeout >= qla82xx_rom_lock_timeout) | 858 | if (timeout >= qla82xx_rom_lock_timeout) { |
859 | lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); | ||
860 | ql_dbg(ql_dbg_p3p, vha, 0xb085, | ||
861 | "Failed to acquire rom lock, acquired by %d.\n", | ||
862 | lock_owner); | ||
857 | return -1; | 863 | return -1; |
864 | } | ||
858 | timeout++; | 865 | timeout++; |
859 | } | 866 | } |
860 | qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); | 867 | qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); |
@@ -3629,7 +3636,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) | |||
3629 | req = ha->req_q_map[que]; | 3636 | req = ha->req_q_map[que]; |
3630 | if (!req) | 3637 | if (!req) |
3631 | continue; | 3638 | continue; |
3632 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | 3639 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { |
3633 | sp = req->outstanding_cmds[cnt]; | 3640 | sp = req->outstanding_cmds[cnt]; |
3634 | if (sp) { | 3641 | if (sp) { |
3635 | if (!sp->u.scmd.ctx || | 3642 | if (!sp->u.scmd.ctx || |
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 6c953e8c08f0..d268e8406fdb 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -897,7 +897,7 @@ struct ct6_dsd { | |||
897 | #define FLT_REG_BOOT_CODE_82XX 0x78 | 897 | #define FLT_REG_BOOT_CODE_82XX 0x78 |
898 | #define FLT_REG_FW_82XX 0x74 | 898 | #define FLT_REG_FW_82XX 0x74 |
899 | #define FLT_REG_GOLD_FW_82XX 0x75 | 899 | #define FLT_REG_GOLD_FW_82XX 0x75 |
900 | #define FLT_REG_VPD_82XX 0x81 | 900 | #define FLT_REG_VPD_8XXX 0x81 |
901 | 901 | ||
902 | #define FA_VPD_SIZE_82XX 0x400 | 902 | #define FA_VPD_SIZE_82XX 0x400 |
903 | 903 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 10d23f8b7036..2c6dd3dfe0f4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -111,8 +111,7 @@ MODULE_PARM_DESC(ql2xfdmienable, | |||
111 | "Enables FDMI registrations. " | 111 | "Enables FDMI registrations. " |
112 | "0 - no FDMI. Default is 1 - perform FDMI."); | 112 | "0 - no FDMI. Default is 1 - perform FDMI."); |
113 | 113 | ||
114 | #define MAX_Q_DEPTH 32 | 114 | int ql2xmaxqdepth = MAX_Q_DEPTH; |
115 | static int ql2xmaxqdepth = MAX_Q_DEPTH; | ||
116 | module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); | 115 | module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); |
117 | MODULE_PARM_DESC(ql2xmaxqdepth, | 116 | MODULE_PARM_DESC(ql2xmaxqdepth, |
118 | "Maximum queue depth to set for each LUN. " | 117 | "Maximum queue depth to set for each LUN. " |
@@ -360,6 +359,9 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) | |||
360 | (req->length + 1) * sizeof(request_t), | 359 | (req->length + 1) * sizeof(request_t), |
361 | req->ring, req->dma); | 360 | req->ring, req->dma); |
362 | 361 | ||
362 | if (req) | ||
363 | kfree(req->outstanding_cmds); | ||
364 | |||
363 | kfree(req); | 365 | kfree(req); |
364 | req = NULL; | 366 | req = NULL; |
365 | } | 367 | } |
@@ -628,7 +630,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) | |||
628 | } | 630 | } |
629 | 631 | ||
630 | CMD_SP(cmd) = NULL; | 632 | CMD_SP(cmd) = NULL; |
631 | mempool_free(sp, ha->srb_mempool); | 633 | qla2x00_rel_sp(sp->fcport->vha, sp); |
632 | } | 634 | } |
633 | 635 | ||
634 | static void | 636 | static void |
@@ -716,9 +718,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
716 | goto qc24_target_busy; | 718 | goto qc24_target_busy; |
717 | } | 719 | } |
718 | 720 | ||
719 | sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC); | 721 | sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); |
720 | if (!sp) | 722 | if (!sp) { |
723 | set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags); | ||
721 | goto qc24_host_busy; | 724 | goto qc24_host_busy; |
725 | } | ||
722 | 726 | ||
723 | sp->u.scmd.cmd = cmd; | 727 | sp->u.scmd.cmd = cmd; |
724 | sp->type = SRB_SCSI_CMD; | 728 | sp->type = SRB_SCSI_CMD; |
@@ -731,6 +735,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
731 | if (rval != QLA_SUCCESS) { | 735 | if (rval != QLA_SUCCESS) { |
732 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, | 736 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, |
733 | "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); | 737 | "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); |
738 | set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags); | ||
734 | goto qc24_host_busy_free_sp; | 739 | goto qc24_host_busy_free_sp; |
735 | } | 740 | } |
736 | 741 | ||
@@ -1010,7 +1015,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, | |||
1010 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1015 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1011 | req = vha->req; | 1016 | req = vha->req; |
1012 | for (cnt = 1; status == QLA_SUCCESS && | 1017 | for (cnt = 1; status == QLA_SUCCESS && |
1013 | cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | 1018 | cnt < req->num_outstanding_cmds; cnt++) { |
1014 | sp = req->outstanding_cmds[cnt]; | 1019 | sp = req->outstanding_cmds[cnt]; |
1015 | if (!sp) | 1020 | if (!sp) |
1016 | continue; | 1021 | continue; |
@@ -1300,14 +1305,14 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) | |||
1300 | } | 1305 | } |
1301 | 1306 | ||
1302 | if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { | 1307 | if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { |
1308 | atomic_set(&vha->loop_state, LOOP_DOWN); | ||
1309 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | ||
1310 | qla2x00_mark_all_devices_lost(vha, 0); | ||
1303 | ret = qla2x00_full_login_lip(vha); | 1311 | ret = qla2x00_full_login_lip(vha); |
1304 | if (ret != QLA_SUCCESS) { | 1312 | if (ret != QLA_SUCCESS) { |
1305 | ql_dbg(ql_dbg_taskm, vha, 0x802d, | 1313 | ql_dbg(ql_dbg_taskm, vha, 0x802d, |
1306 | "full_login_lip=%d.\n", ret); | 1314 | "full_login_lip=%d.\n", ret); |
1307 | } | 1315 | } |
1308 | atomic_set(&vha->loop_state, LOOP_DOWN); | ||
1309 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | ||
1310 | qla2x00_mark_all_devices_lost(vha, 0); | ||
1311 | } | 1316 | } |
1312 | 1317 | ||
1313 | if (ha->flags.enable_lip_reset) { | 1318 | if (ha->flags.enable_lip_reset) { |
@@ -1337,7 +1342,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1337 | req = ha->req_q_map[que]; | 1342 | req = ha->req_q_map[que]; |
1338 | if (!req) | 1343 | if (!req) |
1339 | continue; | 1344 | continue; |
1340 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | 1345 | if (!req->outstanding_cmds) |
1346 | continue; | ||
1347 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { | ||
1341 | sp = req->outstanding_cmds[cnt]; | 1348 | sp = req->outstanding_cmds[cnt]; |
1342 | if (sp) { | 1349 | if (sp) { |
1343 | req->outstanding_cmds[cnt] = NULL; | 1350 | req->outstanding_cmds[cnt] = NULL; |
@@ -1453,6 +1460,81 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) | |||
1453 | return tag_type; | 1460 | return tag_type; |
1454 | } | 1461 | } |
1455 | 1462 | ||
1463 | static void | ||
1464 | qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha) | ||
1465 | { | ||
1466 | scsi_qla_host_t *vp; | ||
1467 | struct Scsi_Host *shost; | ||
1468 | struct scsi_device *sdev; | ||
1469 | struct qla_hw_data *ha = vha->hw; | ||
1470 | unsigned long flags; | ||
1471 | |||
1472 | ha->host_last_rampdown_time = jiffies; | ||
1473 | |||
1474 | if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun) | ||
1475 | return; | ||
1476 | |||
1477 | if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun) | ||
1478 | ha->cfg_lun_q_depth = vha->host->cmd_per_lun; | ||
1479 | else | ||
1480 | ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2; | ||
1481 | |||
1482 | /* | ||
1483 | * Geometrically ramp down the queue depth for all devices on this | ||
1484 | * adapter | ||
1485 | */ | ||
1486 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
1487 | list_for_each_entry(vp, &ha->vp_list, list) { | ||
1488 | shost = vp->host; | ||
1489 | shost_for_each_device(sdev, shost) { | ||
1490 | if (sdev->queue_depth > shost->cmd_per_lun) { | ||
1491 | if (sdev->queue_depth < ha->cfg_lun_q_depth) | ||
1492 | continue; | ||
1493 | ql_log(ql_log_warn, vp, 0x3031, | ||
1494 | "%ld:%d:%d: Ramping down queue depth to %d", | ||
1495 | vp->host_no, sdev->id, sdev->lun, | ||
1496 | ha->cfg_lun_q_depth); | ||
1497 | qla2x00_change_queue_depth(sdev, | ||
1498 | ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT); | ||
1499 | } | ||
1500 | } | ||
1501 | } | ||
1502 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
1503 | |||
1504 | return; | ||
1505 | } | ||
1506 | |||
1507 | static void | ||
1508 | qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha) | ||
1509 | { | ||
1510 | scsi_qla_host_t *vp; | ||
1511 | struct Scsi_Host *shost; | ||
1512 | struct scsi_device *sdev; | ||
1513 | struct qla_hw_data *ha = vha->hw; | ||
1514 | unsigned long flags; | ||
1515 | |||
1516 | ha->host_last_rampup_time = jiffies; | ||
1517 | ha->cfg_lun_q_depth++; | ||
1518 | |||
1519 | /* | ||
1520 | * Linearly ramp up the queue depth for all devices on this | ||
1521 | * adapter | ||
1522 | */ | ||
1523 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
1524 | list_for_each_entry(vp, &ha->vp_list, list) { | ||
1525 | shost = vp->host; | ||
1526 | shost_for_each_device(sdev, shost) { | ||
1527 | if (sdev->queue_depth > ha->cfg_lun_q_depth) | ||
1528 | continue; | ||
1529 | qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth, | ||
1530 | SCSI_QDEPTH_RAMP_UP); | ||
1531 | } | ||
1532 | } | ||
1533 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
1534 | |||
1535 | return; | ||
1536 | } | ||
1537 | |||
1456 | /** | 1538 | /** |
1457 | * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. | 1539 | * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. |
1458 | * @ha: HA context | 1540 | * @ha: HA context |
@@ -1730,6 +1812,9 @@ qla83xx_iospace_config(struct qla_hw_data *ha) | |||
1730 | 1812 | ||
1731 | mqiobase_exit: | 1813 | mqiobase_exit: |
1732 | ha->msix_count = ha->max_rsp_queues + 1; | 1814 | ha->msix_count = ha->max_rsp_queues + 1; |
1815 | |||
1816 | qlt_83xx_iospace_config(ha); | ||
1817 | |||
1733 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, | 1818 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, |
1734 | "MSIX Count:%d.\n", ha->msix_count); | 1819 | "MSIX Count:%d.\n", ha->msix_count); |
1735 | return 0; | 1820 | return 0; |
@@ -2230,6 +2315,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2230 | ha->init_cb_size = sizeof(init_cb_t); | 2315 | ha->init_cb_size = sizeof(init_cb_t); |
2231 | ha->link_data_rate = PORT_SPEED_UNKNOWN; | 2316 | ha->link_data_rate = PORT_SPEED_UNKNOWN; |
2232 | ha->optrom_size = OPTROM_SIZE_2300; | 2317 | ha->optrom_size = OPTROM_SIZE_2300; |
2318 | ha->cfg_lun_q_depth = ql2xmaxqdepth; | ||
2233 | 2319 | ||
2234 | /* Assign ISP specific operations. */ | 2320 | /* Assign ISP specific operations. */ |
2235 | if (IS_QLA2100(ha)) { | 2321 | if (IS_QLA2100(ha)) { |
@@ -2307,6 +2393,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2307 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | 2393 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
2308 | req_length = REQUEST_ENTRY_CNT_24XX; | 2394 | req_length = REQUEST_ENTRY_CNT_24XX; |
2309 | rsp_length = RESPONSE_ENTRY_CNT_2300; | 2395 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
2396 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; | ||
2310 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; | 2397 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
2311 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); | 2398 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); |
2312 | ha->gid_list_info_size = 8; | 2399 | ha->gid_list_info_size = 8; |
@@ -2338,6 +2425,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2338 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | 2425 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
2339 | req_length = REQUEST_ENTRY_CNT_24XX; | 2426 | req_length = REQUEST_ENTRY_CNT_24XX; |
2340 | rsp_length = RESPONSE_ENTRY_CNT_2300; | 2427 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
2428 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; | ||
2341 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; | 2429 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
2342 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); | 2430 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); |
2343 | ha->gid_list_info_size = 8; | 2431 | ha->gid_list_info_size = 8; |
@@ -2377,6 +2465,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2377 | complete(&ha->mbx_cmd_comp); | 2465 | complete(&ha->mbx_cmd_comp); |
2378 | init_completion(&ha->mbx_intr_comp); | 2466 | init_completion(&ha->mbx_intr_comp); |
2379 | init_completion(&ha->dcbx_comp); | 2467 | init_completion(&ha->dcbx_comp); |
2468 | init_completion(&ha->lb_portup_comp); | ||
2380 | 2469 | ||
2381 | set_bit(0, (unsigned long *) ha->vp_idx_map); | 2470 | set_bit(0, (unsigned long *) ha->vp_idx_map); |
2382 | 2471 | ||
@@ -2720,6 +2809,9 @@ qla2x00_shutdown(struct pci_dev *pdev) | |||
2720 | scsi_qla_host_t *vha; | 2809 | scsi_qla_host_t *vha; |
2721 | struct qla_hw_data *ha; | 2810 | struct qla_hw_data *ha; |
2722 | 2811 | ||
2812 | if (!atomic_read(&pdev->enable_cnt)) | ||
2813 | return; | ||
2814 | |||
2723 | vha = pci_get_drvdata(pdev); | 2815 | vha = pci_get_drvdata(pdev); |
2724 | ha = vha->hw; | 2816 | ha = vha->hw; |
2725 | 2817 | ||
@@ -3974,6 +4066,8 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) | |||
3974 | uint32_t idc_lck_rcvry_stage_mask = 0x3; | 4066 | uint32_t idc_lck_rcvry_stage_mask = 0x3; |
3975 | uint32_t idc_lck_rcvry_owner_mask = 0x3c; | 4067 | uint32_t idc_lck_rcvry_owner_mask = 0x3c; |
3976 | struct qla_hw_data *ha = base_vha->hw; | 4068 | struct qla_hw_data *ha = base_vha->hw; |
4069 | ql_dbg(ql_dbg_p3p, base_vha, 0xb086, | ||
4070 | "Trying force recovery of the IDC lock.\n"); | ||
3977 | 4071 | ||
3978 | rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); | 4072 | rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); |
3979 | if (rval) | 4073 | if (rval) |
@@ -4065,6 +4159,7 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) | |||
4065 | { | 4159 | { |
4066 | uint16_t options = (requester_id << 15) | BIT_6; | 4160 | uint16_t options = (requester_id << 15) | BIT_6; |
4067 | uint32_t data; | 4161 | uint32_t data; |
4162 | uint32_t lock_owner; | ||
4068 | struct qla_hw_data *ha = base_vha->hw; | 4163 | struct qla_hw_data *ha = base_vha->hw; |
4069 | 4164 | ||
4070 | /* IDC-lock implementation using driver-lock/lock-id remote registers */ | 4165 | /* IDC-lock implementation using driver-lock/lock-id remote registers */ |
@@ -4076,8 +4171,11 @@ retry_lock: | |||
4076 | qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, | 4171 | qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, |
4077 | ha->portnum); | 4172 | ha->portnum); |
4078 | } else { | 4173 | } else { |
4174 | qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, | ||
4175 | &lock_owner); | ||
4079 | ql_dbg(ql_dbg_p3p, base_vha, 0xb063, | 4176 | ql_dbg(ql_dbg_p3p, base_vha, 0xb063, |
4080 | "Failed to acquire IDC lock. retrying...\n"); | 4177 | "Failed to acquire IDC lock, acquired by %d, " |
4178 | "retrying...\n", lock_owner); | ||
4081 | 4179 | ||
4082 | /* Retry/Perform IDC-Lock recovery */ | 4180 | /* Retry/Perform IDC-Lock recovery */ |
4083 | if (qla83xx_idc_lock_recovery(base_vha) | 4181 | if (qla83xx_idc_lock_recovery(base_vha) |
@@ -4605,6 +4703,18 @@ qla2x00_do_dpc(void *data) | |||
4605 | qla2xxx_flash_npiv_conf(base_vha); | 4703 | qla2xxx_flash_npiv_conf(base_vha); |
4606 | } | 4704 | } |
4607 | 4705 | ||
4706 | if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, | ||
4707 | &base_vha->dpc_flags)) { | ||
4708 | /* Prevents simultaneous ramp up and down */ | ||
4709 | clear_bit(HOST_RAMP_UP_QUEUE_DEPTH, | ||
4710 | &base_vha->dpc_flags); | ||
4711 | qla2x00_host_ramp_down_queuedepth(base_vha); | ||
4712 | } | ||
4713 | |||
4714 | if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH, | ||
4715 | &base_vha->dpc_flags)) | ||
4716 | qla2x00_host_ramp_up_queuedepth(base_vha); | ||
4717 | |||
4608 | if (!ha->interrupts_on) | 4718 | if (!ha->interrupts_on) |
4609 | ha->isp_ops->enable_intrs(ha); | 4719 | ha->isp_ops->enable_intrs(ha); |
4610 | 4720 | ||
@@ -4733,7 +4843,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
4733 | cpu_flags); | 4843 | cpu_flags); |
4734 | req = ha->req_q_map[0]; | 4844 | req = ha->req_q_map[0]; |
4735 | for (index = 1; | 4845 | for (index = 1; |
4736 | index < MAX_OUTSTANDING_COMMANDS; | 4846 | index < req->num_outstanding_cmds; |
4737 | index++) { | 4847 | index++) { |
4738 | fc_port_t *sfcp; | 4848 | fc_port_t *sfcp; |
4739 | 4849 | ||
@@ -4802,7 +4912,9 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
4802 | test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || | 4912 | test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || |
4803 | test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || | 4913 | test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || |
4804 | test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || | 4914 | test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || |
4805 | test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) { | 4915 | test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || |
4916 | test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) || | ||
4917 | test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) { | ||
4806 | ql_dbg(ql_dbg_timer, vha, 0x600b, | 4918 | ql_dbg(ql_dbg_timer, vha, 0x600b, |
4807 | "isp_abort_needed=%d loop_resync_needed=%d " | 4919 | "isp_abort_needed=%d loop_resync_needed=%d " |
4808 | "fcport_update_needed=%d start_dpc=%d " | 4920 | "fcport_update_needed=%d start_dpc=%d " |
@@ -4815,12 +4927,15 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
4815 | ql_dbg(ql_dbg_timer, vha, 0x600c, | 4927 | ql_dbg(ql_dbg_timer, vha, 0x600c, |
4816 | "beacon_blink_needed=%d isp_unrecoverable=%d " | 4928 | "beacon_blink_needed=%d isp_unrecoverable=%d " |
4817 | "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " | 4929 | "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " |
4818 | "relogin_needed=%d.\n", | 4930 | "relogin_needed=%d, host_ramp_down_needed=%d " |
4931 | "host_ramp_up_needed=%d.\n", | ||
4819 | test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), | 4932 | test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), |
4820 | test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), | 4933 | test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), |
4821 | test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), | 4934 | test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), |
4822 | test_bit(VP_DPC_NEEDED, &vha->dpc_flags), | 4935 | test_bit(VP_DPC_NEEDED, &vha->dpc_flags), |
4823 | test_bit(RELOGIN_NEEDED, &vha->dpc_flags)); | 4936 | test_bit(RELOGIN_NEEDED, &vha->dpc_flags), |
4937 | test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags), | ||
4938 | test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags)); | ||
4824 | qla2xxx_wake_dpc(vha); | 4939 | qla2xxx_wake_dpc(vha); |
4825 | } | 4940 | } |
4826 | 4941 | ||
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h index 892a81e457bc..46ef0ac48f44 100644 --- a/drivers/scsi/qla2xxx/qla_settings.h +++ b/drivers/scsi/qla2xxx/qla_settings.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 32fdc2a66dd1..3bef6736d885 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -798,20 +798,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
798 | case FLT_REG_BOOTLOAD_82XX: | 798 | case FLT_REG_BOOTLOAD_82XX: |
799 | ha->flt_region_bootload = start; | 799 | ha->flt_region_bootload = start; |
800 | break; | 800 | break; |
801 | case FLT_REG_VPD_82XX: | 801 | case FLT_REG_VPD_8XXX: |
802 | ha->flt_region_vpd = start; | 802 | if (IS_CNA_CAPABLE(ha)) |
803 | break; | ||
804 | case FLT_REG_FCOE_VPD_0: | ||
805 | if (!IS_QLA8031(ha)) | ||
806 | break; | ||
807 | ha->flt_region_vpd_nvram = start; | ||
808 | if (ha->flags.port0) | ||
809 | ha->flt_region_vpd = start; | ||
810 | break; | ||
811 | case FLT_REG_FCOE_VPD_1: | ||
812 | if (!IS_QLA8031(ha)) | ||
813 | break; | ||
814 | if (!ha->flags.port0) | ||
815 | ha->flt_region_vpd = start; | 803 | ha->flt_region_vpd = start; |
816 | break; | 804 | break; |
817 | case FLT_REG_FCOE_NVRAM_0: | 805 | case FLT_REG_FCOE_NVRAM_0: |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 80f4b849e2b0..61b5d8c2b5da 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(qlini_mode, | |||
52 | "\"disabled\" - initiator mode will never be enabled; " | 52 | "\"disabled\" - initiator mode will never be enabled; " |
53 | "\"enabled\" (default) - initiator mode will always stay enabled."); | 53 | "\"enabled\" (default) - initiator mode will always stay enabled."); |
54 | 54 | ||
55 | static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; | 55 | int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * From scsi/fc/fc_fcp.h | 58 | * From scsi/fc/fc_fcp.h |
@@ -1119,6 +1119,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, | |||
1119 | nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; | 1119 | nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; |
1120 | nack->u.isp24.status = ntfy->u.isp24.status; | 1120 | nack->u.isp24.status = ntfy->u.isp24.status; |
1121 | nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; | 1121 | nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; |
1122 | nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; | ||
1122 | nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; | 1123 | nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; |
1123 | nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; | 1124 | nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; |
1124 | nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; | 1125 | nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; |
@@ -1570,7 +1571,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) | |||
1570 | /* always increment cmd handle */ | 1571 | /* always increment cmd handle */ |
1571 | do { | 1572 | do { |
1572 | ++h; | 1573 | ++h; |
1573 | if (h > MAX_OUTSTANDING_COMMANDS) | 1574 | if (h > DEFAULT_OUTSTANDING_COMMANDS) |
1574 | h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ | 1575 | h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ |
1575 | if (h == ha->tgt.current_handle) { | 1576 | if (h == ha->tgt.current_handle) { |
1576 | ql_dbg(ql_dbg_tgt, vha, 0xe04e, | 1577 | ql_dbg(ql_dbg_tgt, vha, 0xe04e, |
@@ -2441,7 +2442,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, | |||
2441 | return NULL; | 2442 | return NULL; |
2442 | } | 2443 | } |
2443 | /* handle-1 is actually used */ | 2444 | /* handle-1 is actually used */ |
2444 | if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) { | 2445 | if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { |
2445 | ql_dbg(ql_dbg_tgt, vha, 0xe052, | 2446 | ql_dbg(ql_dbg_tgt, vha, 0xe052, |
2446 | "qla_target(%d): Wrong handle %x received\n", | 2447 | "qla_target(%d): Wrong handle %x received\n", |
2447 | vha->vp_idx, handle); | 2448 | vha->vp_idx, handle); |
@@ -4305,6 +4306,12 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) | |||
4305 | if (!QLA_TGT_MODE_ENABLED()) | 4306 | if (!QLA_TGT_MODE_ENABLED()) |
4306 | return 0; | 4307 | return 0; |
4307 | 4308 | ||
4309 | if (!IS_TGT_MODE_CAPABLE(ha)) { | ||
4310 | ql_log(ql_log_warn, base_vha, 0xe070, | ||
4311 | "This adapter does not support target mode.\n"); | ||
4312 | return 0; | ||
4313 | } | ||
4314 | |||
4308 | ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, | 4315 | ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, |
4309 | "Registering target for host %ld(%p)", base_vha->host_no, ha); | 4316 | "Registering target for host %ld(%p)", base_vha->host_no, ha); |
4310 | 4317 | ||
@@ -4666,7 +4673,6 @@ void | |||
4666 | qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) | 4673 | qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) |
4667 | { | 4674 | { |
4668 | struct qla_hw_data *ha = vha->hw; | 4675 | struct qla_hw_data *ha = vha->hw; |
4669 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | ||
4670 | struct atio_from_isp *pkt; | 4676 | struct atio_from_isp *pkt; |
4671 | int cnt, i; | 4677 | int cnt, i; |
4672 | 4678 | ||
@@ -4694,26 +4700,28 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) | |||
4694 | } | 4700 | } |
4695 | 4701 | ||
4696 | /* Adjust ring index */ | 4702 | /* Adjust ring index */ |
4697 | WRT_REG_DWORD(®->atio_q_out, ha->tgt.atio_ring_index); | 4703 | WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); |
4698 | } | 4704 | } |
4699 | 4705 | ||
4700 | void | 4706 | void |
4701 | qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg) | 4707 | qlt_24xx_config_rings(struct scsi_qla_host *vha) |
4702 | { | 4708 | { |
4703 | struct qla_hw_data *ha = vha->hw; | 4709 | struct qla_hw_data *ha = vha->hw; |
4710 | if (!QLA_TGT_MODE_ENABLED()) | ||
4711 | return; | ||
4704 | 4712 | ||
4705 | /* FIXME: atio_q in/out for ha->mqenable=1..? */ | 4713 | WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); |
4706 | if (ha->mqenable) { | 4714 | WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); |
4707 | #if 0 | 4715 | RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); |
4708 | WRT_REG_DWORD(®->isp25mq.atio_q_in, 0); | 4716 | |
4709 | WRT_REG_DWORD(®->isp25mq.atio_q_out, 0); | 4717 | if (IS_ATIO_MSIX_CAPABLE(ha)) { |
4710 | RD_REG_DWORD(®->isp25mq.atio_q_out); | 4718 | struct qla_msix_entry *msix = &ha->msix_entries[2]; |
4711 | #endif | 4719 | struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; |
4712 | } else { | 4720 | |
4713 | /* Setup APTIO registers for target mode */ | 4721 | icb->msix_atio = cpu_to_le16(msix->entry); |
4714 | WRT_REG_DWORD(®->isp24.atio_q_in, 0); | 4722 | ql_dbg(ql_dbg_init, vha, 0xf072, |
4715 | WRT_REG_DWORD(®->isp24.atio_q_out, 0); | 4723 | "Registering ICB vector 0x%x for atio que.\n", |
4716 | RD_REG_DWORD(®->isp24.atio_q_out); | 4724 | msix->entry); |
4717 | } | 4725 | } |
4718 | } | 4726 | } |
4719 | 4727 | ||
@@ -4796,6 +4804,101 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, | |||
4796 | } | 4804 | } |
4797 | } | 4805 | } |
4798 | 4806 | ||
4807 | void | ||
4808 | qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) | ||
4809 | { | ||
4810 | struct qla_hw_data *ha = vha->hw; | ||
4811 | |||
4812 | if (!QLA_TGT_MODE_ENABLED()) | ||
4813 | return; | ||
4814 | |||
4815 | if (qla_tgt_mode_enabled(vha)) { | ||
4816 | if (!ha->tgt.saved_set) { | ||
4817 | /* We save only once */ | ||
4818 | ha->tgt.saved_exchange_count = nv->exchange_count; | ||
4819 | ha->tgt.saved_firmware_options_1 = | ||
4820 | nv->firmware_options_1; | ||
4821 | ha->tgt.saved_firmware_options_2 = | ||
4822 | nv->firmware_options_2; | ||
4823 | ha->tgt.saved_firmware_options_3 = | ||
4824 | nv->firmware_options_3; | ||
4825 | ha->tgt.saved_set = 1; | ||
4826 | } | ||
4827 | |||
4828 | nv->exchange_count = __constant_cpu_to_le16(0xFFFF); | ||
4829 | |||
4830 | /* Enable target mode */ | ||
4831 | nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); | ||
4832 | |||
4833 | /* Disable ini mode, if requested */ | ||
4834 | if (!qla_ini_mode_enabled(vha)) | ||
4835 | nv->firmware_options_1 |= | ||
4836 | __constant_cpu_to_le32(BIT_5); | ||
4837 | |||
4838 | /* Disable Full Login after LIP */ | ||
4839 | nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); | ||
4840 | /* Enable initial LIP */ | ||
4841 | nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); | ||
4842 | /* Enable FC tapes support */ | ||
4843 | nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); | ||
4844 | /* Disable Full Login after LIP */ | ||
4845 | nv->host_p &= __constant_cpu_to_le32(~BIT_10); | ||
4846 | /* Enable target PRLI control */ | ||
4847 | nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); | ||
4848 | } else { | ||
4849 | if (ha->tgt.saved_set) { | ||
4850 | nv->exchange_count = ha->tgt.saved_exchange_count; | ||
4851 | nv->firmware_options_1 = | ||
4852 | ha->tgt.saved_firmware_options_1; | ||
4853 | nv->firmware_options_2 = | ||
4854 | ha->tgt.saved_firmware_options_2; | ||
4855 | nv->firmware_options_3 = | ||
4856 | ha->tgt.saved_firmware_options_3; | ||
4857 | } | ||
4858 | return; | ||
4859 | } | ||
4860 | |||
4861 | /* out-of-order frames reassembly */ | ||
4862 | nv->firmware_options_3 |= BIT_6|BIT_9; | ||
4863 | |||
4864 | if (ha->tgt.enable_class_2) { | ||
4865 | if (vha->flags.init_done) | ||
4866 | fc_host_supported_classes(vha->host) = | ||
4867 | FC_COS_CLASS2 | FC_COS_CLASS3; | ||
4868 | |||
4869 | nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); | ||
4870 | } else { | ||
4871 | if (vha->flags.init_done) | ||
4872 | fc_host_supported_classes(vha->host) = FC_COS_CLASS3; | ||
4873 | |||
4874 | nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); | ||
4875 | } | ||
4876 | } | ||
4877 | |||
4878 | void | ||
4879 | qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, | ||
4880 | struct init_cb_81xx *icb) | ||
4881 | { | ||
4882 | struct qla_hw_data *ha = vha->hw; | ||
4883 | |||
4884 | if (!QLA_TGT_MODE_ENABLED()) | ||
4885 | return; | ||
4886 | |||
4887 | if (ha->tgt.node_name_set) { | ||
4888 | memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); | ||
4889 | icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); | ||
4890 | } | ||
4891 | } | ||
4892 | |||
4893 | void | ||
4894 | qlt_83xx_iospace_config(struct qla_hw_data *ha) | ||
4895 | { | ||
4896 | if (!QLA_TGT_MODE_ENABLED()) | ||
4897 | return; | ||
4898 | |||
4899 | ha->msix_count += 1; /* For ATIO Q */ | ||
4900 | } | ||
4901 | |||
4799 | int | 4902 | int |
4800 | qlt_24xx_process_response_error(struct scsi_qla_host *vha, | 4903 | qlt_24xx_process_response_error(struct scsi_qla_host *vha, |
4801 | struct sts_entry_24xx *pkt) | 4904 | struct sts_entry_24xx *pkt) |
@@ -4828,11 +4931,41 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) | |||
4828 | if (!QLA_TGT_MODE_ENABLED()) | 4931 | if (!QLA_TGT_MODE_ENABLED()) |
4829 | return; | 4932 | return; |
4830 | 4933 | ||
4934 | if (ha->mqenable || IS_QLA83XX(ha)) { | ||
4935 | ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; | ||
4936 | ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; | ||
4937 | } else { | ||
4938 | ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; | ||
4939 | ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; | ||
4940 | } | ||
4941 | |||
4831 | mutex_init(&ha->tgt.tgt_mutex); | 4942 | mutex_init(&ha->tgt.tgt_mutex); |
4832 | mutex_init(&ha->tgt.tgt_host_action_mutex); | 4943 | mutex_init(&ha->tgt.tgt_host_action_mutex); |
4833 | qlt_clear_mode(base_vha); | 4944 | qlt_clear_mode(base_vha); |
4834 | } | 4945 | } |
4835 | 4946 | ||
4947 | irqreturn_t | ||
4948 | qla83xx_msix_atio_q(int irq, void *dev_id) | ||
4949 | { | ||
4950 | struct rsp_que *rsp; | ||
4951 | scsi_qla_host_t *vha; | ||
4952 | struct qla_hw_data *ha; | ||
4953 | unsigned long flags; | ||
4954 | |||
4955 | rsp = (struct rsp_que *) dev_id; | ||
4956 | ha = rsp->hw; | ||
4957 | vha = pci_get_drvdata(ha->pdev); | ||
4958 | |||
4959 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
4960 | |||
4961 | qlt_24xx_process_atio_queue(vha); | ||
4962 | qla24xx_process_response_queue(vha, rsp); | ||
4963 | |||
4964 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4965 | |||
4966 | return IRQ_HANDLED; | ||
4967 | } | ||
4968 | |||
4836 | int | 4969 | int |
4837 | qlt_mem_alloc(struct qla_hw_data *ha) | 4970 | qlt_mem_alloc(struct qla_hw_data *ha) |
4838 | { | 4971 | { |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index bad749561ec2..ff9ccb9fd036 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
@@ -60,8 +60,9 @@ | |||
60 | * multi-complete should come to the tgt driver or be handled there by qla2xxx | 60 | * multi-complete should come to the tgt driver or be handled there by qla2xxx |
61 | */ | 61 | */ |
62 | #define CTIO_COMPLETION_HANDLE_MARK BIT_29 | 62 | #define CTIO_COMPLETION_HANDLE_MARK BIT_29 |
63 | #if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS) | 63 | #if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS) |
64 | #error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS" | 64 | #error "CTIO_COMPLETION_HANDLE_MARK not larger than " |
65 | "DEFAULT_OUTSTANDING_COMMANDS" | ||
65 | #endif | 66 | #endif |
66 | #define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK) | 67 | #define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK) |
67 | 68 | ||
@@ -161,7 +162,7 @@ struct imm_ntfy_from_isp { | |||
161 | uint16_t srr_rx_id; | 162 | uint16_t srr_rx_id; |
162 | uint16_t status; | 163 | uint16_t status; |
163 | uint8_t status_subcode; | 164 | uint8_t status_subcode; |
164 | uint8_t reserved_3; | 165 | uint8_t fw_handle; |
165 | uint32_t exchange_address; | 166 | uint32_t exchange_address; |
166 | uint32_t srr_rel_offs; | 167 | uint32_t srr_rel_offs; |
167 | uint16_t srr_ui; | 168 | uint16_t srr_ui; |
@@ -217,7 +218,7 @@ struct nack_to_isp { | |||
217 | uint16_t srr_rx_id; | 218 | uint16_t srr_rx_id; |
218 | uint16_t status; | 219 | uint16_t status; |
219 | uint8_t status_subcode; | 220 | uint8_t status_subcode; |
220 | uint8_t reserved_3; | 221 | uint8_t fw_handle; |
221 | uint32_t exchange_address; | 222 | uint32_t exchange_address; |
222 | uint32_t srr_rel_offs; | 223 | uint32_t srr_rel_offs; |
223 | uint16_t srr_ui; | 224 | uint16_t srr_ui; |
@@ -948,6 +949,7 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int); | |||
948 | * is not set. Right now, ha value is ignored. | 949 | * is not set. Right now, ha value is ignored. |
949 | */ | 950 | */ |
950 | #define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED) | 951 | #define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED) |
952 | extern int ql2x_ini_mode; | ||
951 | 953 | ||
952 | static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha) | 954 | static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha) |
953 | { | 955 | { |
@@ -985,12 +987,15 @@ extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); | |||
985 | extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *); | 987 | extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *); |
986 | extern void qlt_init_atio_q_entries(struct scsi_qla_host *); | 988 | extern void qlt_init_atio_q_entries(struct scsi_qla_host *); |
987 | extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *); | 989 | extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *); |
988 | extern void qlt_24xx_config_rings(struct scsi_qla_host *, | 990 | extern void qlt_24xx_config_rings(struct scsi_qla_host *); |
989 | device_reg_t __iomem *); | ||
990 | extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *, | 991 | extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *, |
991 | struct nvram_24xx *); | 992 | struct nvram_24xx *); |
992 | extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *, | 993 | extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *, |
993 | struct init_cb_24xx *); | 994 | struct init_cb_24xx *); |
995 | extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *, | ||
996 | struct init_cb_81xx *); | ||
997 | extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *, | ||
998 | struct nvram_81xx *); | ||
994 | extern int qlt_24xx_process_response_error(struct scsi_qla_host *, | 999 | extern int qlt_24xx_process_response_error(struct scsi_qla_host *, |
995 | struct sts_entry_24xx *); | 1000 | struct sts_entry_24xx *); |
996 | extern void qlt_modify_vp_config(struct scsi_qla_host *, | 1001 | extern void qlt_modify_vp_config(struct scsi_qla_host *, |
@@ -1000,5 +1005,7 @@ extern int qlt_mem_alloc(struct qla_hw_data *); | |||
1000 | extern void qlt_mem_free(struct qla_hw_data *); | 1005 | extern void qlt_mem_free(struct qla_hw_data *); |
1001 | extern void qlt_stop_phase1(struct qla_tgt *); | 1006 | extern void qlt_stop_phase1(struct qla_tgt *); |
1002 | extern void qlt_stop_phase2(struct qla_tgt *); | 1007 | extern void qlt_stop_phase2(struct qla_tgt *); |
1008 | extern irqreturn_t qla83xx_msix_atio_q(int, void *); | ||
1009 | extern void qlt_83xx_iospace_config(struct qla_hw_data *); | ||
1003 | 1010 | ||
1004 | #endif /* __QLA_TARGET_H */ | 1011 | #endif /* __QLA_TARGET_H */ |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 49697ca41e78..2b6e478d9e33 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2013 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 81e738d61ec0..160d33697216 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -1420,10 +1420,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, | |||
1420 | dma_addr_t chap_dma; | 1420 | dma_addr_t chap_dma; |
1421 | 1421 | ||
1422 | chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); | 1422 | chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); |
1423 | if (chap_table == NULL) { | 1423 | if (chap_table == NULL) |
1424 | ret = -ENOMEM; | 1424 | return -ENOMEM; |
1425 | goto exit_get_chap; | ||
1426 | } | ||
1427 | 1425 | ||
1428 | chap_size = sizeof(struct ql4_chap_table); | 1426 | chap_size = sizeof(struct ql4_chap_table); |
1429 | memset(chap_table, 0, chap_size); | 1427 | memset(chap_table, 0, chap_size); |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 270b3cf6f372..16a3a0cc9672 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
@@ -201,6 +201,7 @@ enum storvsc_request_type { | |||
201 | #define SRB_STATUS_AUTOSENSE_VALID 0x80 | 201 | #define SRB_STATUS_AUTOSENSE_VALID 0x80 |
202 | #define SRB_STATUS_INVALID_LUN 0x20 | 202 | #define SRB_STATUS_INVALID_LUN 0x20 |
203 | #define SRB_STATUS_SUCCESS 0x01 | 203 | #define SRB_STATUS_SUCCESS 0x01 |
204 | #define SRB_STATUS_ABORTED 0x02 | ||
204 | #define SRB_STATUS_ERROR 0x04 | 205 | #define SRB_STATUS_ERROR 0x04 |
205 | 206 | ||
206 | /* | 207 | /* |
@@ -295,6 +296,25 @@ struct storvsc_scan_work { | |||
295 | uint lun; | 296 | uint lun; |
296 | }; | 297 | }; |
297 | 298 | ||
299 | static void storvsc_device_scan(struct work_struct *work) | ||
300 | { | ||
301 | struct storvsc_scan_work *wrk; | ||
302 | uint lun; | ||
303 | struct scsi_device *sdev; | ||
304 | |||
305 | wrk = container_of(work, struct storvsc_scan_work, work); | ||
306 | lun = wrk->lun; | ||
307 | |||
308 | sdev = scsi_device_lookup(wrk->host, 0, 0, lun); | ||
309 | if (!sdev) | ||
310 | goto done; | ||
311 | scsi_rescan_device(&sdev->sdev_gendev); | ||
312 | scsi_device_put(sdev); | ||
313 | |||
314 | done: | ||
315 | kfree(wrk); | ||
316 | } | ||
317 | |||
298 | static void storvsc_bus_scan(struct work_struct *work) | 318 | static void storvsc_bus_scan(struct work_struct *work) |
299 | { | 319 | { |
300 | struct storvsc_scan_work *wrk; | 320 | struct storvsc_scan_work *wrk; |
@@ -467,6 +487,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, | |||
467 | if (!bounce_sgl) | 487 | if (!bounce_sgl) |
468 | return NULL; | 488 | return NULL; |
469 | 489 | ||
490 | sg_init_table(bounce_sgl, num_pages); | ||
470 | for (i = 0; i < num_pages; i++) { | 491 | for (i = 0; i < num_pages; i++) { |
471 | page_buf = alloc_page(GFP_ATOMIC); | 492 | page_buf = alloc_page(GFP_ATOMIC); |
472 | if (!page_buf) | 493 | if (!page_buf) |
@@ -760,6 +781,66 @@ cleanup: | |||
760 | return ret; | 781 | return ret; |
761 | } | 782 | } |
762 | 783 | ||
784 | static void storvsc_handle_error(struct vmscsi_request *vm_srb, | ||
785 | struct scsi_cmnd *scmnd, | ||
786 | struct Scsi_Host *host, | ||
787 | u8 asc, u8 ascq) | ||
788 | { | ||
789 | struct storvsc_scan_work *wrk; | ||
790 | void (*process_err_fn)(struct work_struct *work); | ||
791 | bool do_work = false; | ||
792 | |||
793 | switch (vm_srb->srb_status) { | ||
794 | case SRB_STATUS_ERROR: | ||
795 | /* | ||
796 | * If there is an error; offline the device since all | ||
797 | * error recovery strategies would have already been | ||
798 | * deployed on the host side. However, if the command | ||
799 | * were a pass-through command deal with it appropriately. | ||
800 | */ | ||
801 | switch (scmnd->cmnd[0]) { | ||
802 | case ATA_16: | ||
803 | case ATA_12: | ||
804 | set_host_byte(scmnd, DID_PASSTHROUGH); | ||
805 | break; | ||
806 | default: | ||
807 | set_host_byte(scmnd, DID_TARGET_FAILURE); | ||
808 | } | ||
809 | break; | ||
810 | case SRB_STATUS_INVALID_LUN: | ||
811 | do_work = true; | ||
812 | process_err_fn = storvsc_remove_lun; | ||
813 | break; | ||
814 | case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID): | ||
815 | if ((asc == 0x2a) && (ascq == 0x9)) { | ||
816 | do_work = true; | ||
817 | process_err_fn = storvsc_device_scan; | ||
818 | /* | ||
819 | * Retry the I/O that trigerred this. | ||
820 | */ | ||
821 | set_host_byte(scmnd, DID_REQUEUE); | ||
822 | } | ||
823 | break; | ||
824 | } | ||
825 | |||
826 | if (!do_work) | ||
827 | return; | ||
828 | |||
829 | /* | ||
830 | * We need to schedule work to process this error; schedule it. | ||
831 | */ | ||
832 | wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); | ||
833 | if (!wrk) { | ||
834 | set_host_byte(scmnd, DID_TARGET_FAILURE); | ||
835 | return; | ||
836 | } | ||
837 | |||
838 | wrk->host = host; | ||
839 | wrk->lun = vm_srb->lun; | ||
840 | INIT_WORK(&wrk->work, process_err_fn); | ||
841 | schedule_work(&wrk->work); | ||
842 | } | ||
843 | |||
763 | 844 | ||
764 | static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) | 845 | static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) |
765 | { | 846 | { |
@@ -768,8 +849,13 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) | |||
768 | void (*scsi_done_fn)(struct scsi_cmnd *); | 849 | void (*scsi_done_fn)(struct scsi_cmnd *); |
769 | struct scsi_sense_hdr sense_hdr; | 850 | struct scsi_sense_hdr sense_hdr; |
770 | struct vmscsi_request *vm_srb; | 851 | struct vmscsi_request *vm_srb; |
771 | struct storvsc_scan_work *wrk; | ||
772 | struct stor_mem_pools *memp = scmnd->device->hostdata; | 852 | struct stor_mem_pools *memp = scmnd->device->hostdata; |
853 | struct Scsi_Host *host; | ||
854 | struct storvsc_device *stor_dev; | ||
855 | struct hv_device *dev = host_dev->dev; | ||
856 | |||
857 | stor_dev = get_in_stor_device(dev); | ||
858 | host = stor_dev->host; | ||
773 | 859 | ||
774 | vm_srb = &cmd_request->vstor_packet.vm_srb; | 860 | vm_srb = &cmd_request->vstor_packet.vm_srb; |
775 | if (cmd_request->bounce_sgl_count) { | 861 | if (cmd_request->bounce_sgl_count) { |
@@ -782,55 +868,18 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) | |||
782 | cmd_request->bounce_sgl_count); | 868 | cmd_request->bounce_sgl_count); |
783 | } | 869 | } |
784 | 870 | ||
785 | /* | ||
786 | * If there is an error; offline the device since all | ||
787 | * error recovery strategies would have already been | ||
788 | * deployed on the host side. However, if the command | ||
789 | * were a pass-through command deal with it appropriately. | ||
790 | */ | ||
791 | scmnd->result = vm_srb->scsi_status; | 871 | scmnd->result = vm_srb->scsi_status; |
792 | 872 | ||
793 | if (vm_srb->srb_status == SRB_STATUS_ERROR) { | ||
794 | switch (scmnd->cmnd[0]) { | ||
795 | case ATA_16: | ||
796 | case ATA_12: | ||
797 | set_host_byte(scmnd, DID_PASSTHROUGH); | ||
798 | break; | ||
799 | default: | ||
800 | set_host_byte(scmnd, DID_TARGET_FAILURE); | ||
801 | } | ||
802 | } | ||
803 | |||
804 | |||
805 | /* | ||
806 | * If the LUN is invalid; remove the device. | ||
807 | */ | ||
808 | if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) { | ||
809 | struct storvsc_device *stor_dev; | ||
810 | struct hv_device *dev = host_dev->dev; | ||
811 | struct Scsi_Host *host; | ||
812 | |||
813 | stor_dev = get_in_stor_device(dev); | ||
814 | host = stor_dev->host; | ||
815 | |||
816 | wrk = kmalloc(sizeof(struct storvsc_scan_work), | ||
817 | GFP_ATOMIC); | ||
818 | if (!wrk) { | ||
819 | scmnd->result = DID_TARGET_FAILURE << 16; | ||
820 | } else { | ||
821 | wrk->host = host; | ||
822 | wrk->lun = vm_srb->lun; | ||
823 | INIT_WORK(&wrk->work, storvsc_remove_lun); | ||
824 | schedule_work(&wrk->work); | ||
825 | } | ||
826 | } | ||
827 | |||
828 | if (scmnd->result) { | 873 | if (scmnd->result) { |
829 | if (scsi_normalize_sense(scmnd->sense_buffer, | 874 | if (scsi_normalize_sense(scmnd->sense_buffer, |
830 | SCSI_SENSE_BUFFERSIZE, &sense_hdr)) | 875 | SCSI_SENSE_BUFFERSIZE, &sense_hdr)) |
831 | scsi_print_sense_hdr("storvsc", &sense_hdr); | 876 | scsi_print_sense_hdr("storvsc", &sense_hdr); |
832 | } | 877 | } |
833 | 878 | ||
879 | if (vm_srb->srb_status != SRB_STATUS_SUCCESS) | ||
880 | storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, | ||
881 | sense_hdr.ascq); | ||
882 | |||
834 | scsi_set_resid(scmnd, | 883 | scsi_set_resid(scmnd, |
835 | cmd_request->data_buffer.len - | 884 | cmd_request->data_buffer.len - |
836 | vm_srb->data_transfer_length); | 885 | vm_srb->data_transfer_length); |
@@ -1155,6 +1204,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice) | |||
1155 | 1204 | ||
1156 | blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY); | 1205 | blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY); |
1157 | 1206 | ||
1207 | sdevice->no_write_same = 1; | ||
1208 | |||
1158 | return 0; | 1209 | return 0; |
1159 | } | 1210 | } |
1160 | 1211 | ||
@@ -1237,6 +1288,8 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) | |||
1237 | u8 scsi_op = scmnd->cmnd[0]; | 1288 | u8 scsi_op = scmnd->cmnd[0]; |
1238 | 1289 | ||
1239 | switch (scsi_op) { | 1290 | switch (scsi_op) { |
1291 | /* the host does not handle WRITE_SAME, log accident usage */ | ||
1292 | case WRITE_SAME: | ||
1240 | /* | 1293 | /* |
1241 | * smartd sends this command and the host does not handle | 1294 | * smartd sends this command and the host does not handle |
1242 | * this. So, don't send it. | 1295 | * this. So, don't send it. |
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 8f27f9d6f91d..0371047c5922 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig | |||
@@ -2,48 +2,58 @@ | |||
2 | # Kernel configuration file for the UFS Host Controller | 2 | # Kernel configuration file for the UFS Host Controller |
3 | # | 3 | # |
4 | # This code is based on drivers/scsi/ufs/Kconfig | 4 | # This code is based on drivers/scsi/ufs/Kconfig |
5 | # Copyright (C) 2011 Samsung Samsung India Software Operations | 5 | # Copyright (C) 2011-2013 Samsung India Software Operations |
6 | # | ||
7 | # Authors: | ||
8 | # Santosh Yaraganavi <santosh.sy@samsung.com> | ||
9 | # Vinayak Holikatti <h.vinayak@samsung.com> | ||
6 | # | 10 | # |
7 | # Santosh Yaraganavi <santosh.sy@samsung.com> | ||
8 | # Vinayak Holikatti <h.vinayak@samsung.com> | ||
9 | |||
10 | # This program is free software; you can redistribute it and/or | 11 | # This program is free software; you can redistribute it and/or |
11 | # modify it under the terms of the GNU General Public License | 12 | # modify it under the terms of the GNU General Public License |
12 | # as published by the Free Software Foundation; either version 2 | 13 | # as published by the Free Software Foundation; either version 2 |
13 | # of the License, or (at your option) any later version. | 14 | # of the License, or (at your option) any later version. |
14 | 15 | # See the COPYING file in the top-level directory or visit | |
16 | # <http://www.gnu.org/licenses/gpl-2.0.html> | ||
17 | # | ||
15 | # This program is distributed in the hope that it will be useful, | 18 | # This program is distributed in the hope that it will be useful, |
16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | # GNU General Public License for more details. | 21 | # GNU General Public License for more details. |
22 | # | ||
23 | # This program is provided "AS IS" and "WITH ALL FAULTS" and | ||
24 | # without warranty of any kind. You are solely responsible for | ||
25 | # determining the appropriateness of using and distributing | ||
26 | # the program and assume all risks associated with your exercise | ||
27 | # of rights with respect to the program, including but not limited | ||
28 | # to infringement of third party rights, the risks and costs of | ||
29 | # program errors, damage to or loss of data, programs or equipment, | ||
30 | # and unavailability or interruption of operations. Under no | ||
31 | # circumstances will the contributor of this Program be liable for | ||
32 | # any damages of any kind arising from your use or distribution of | ||
33 | # this program. | ||
19 | 34 | ||
20 | # NO WARRANTY | 35 | config SCSI_UFSHCD |
21 | # THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | 36 | tristate "Universal Flash Storage Controller Driver Core" |
22 | # CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | 37 | depends on SCSI |
23 | # LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | 38 | ---help--- |
24 | # MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | 39 | This selects the support for UFS devices in Linux, say Y and make |
25 | # solely responsible for determining the appropriateness of using and | 40 | sure that you know the name of your UFS host adapter (the card |
26 | # distributing the Program and assumes all risks associated with its | 41 | inside your computer that "speaks" the UFS protocol, also |
27 | # exercise of rights under this Agreement, including but not limited to | 42 | called UFS Host Controller), because you will be asked for it. |
28 | # the risks and costs of program errors, damage to or loss of data, | 43 | The module will be called ufshcd. |
29 | # programs or equipment, and unavailability or interruption of operations. | ||
30 | |||
31 | # DISCLAIMER OF LIABILITY | ||
32 | # NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
33 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
34 | # DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
35 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
36 | # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
37 | # USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
38 | # HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
39 | 44 | ||
40 | # You should have received a copy of the GNU General Public License | 45 | To compile this driver as a module, choose M here and read |
41 | # along with this program; if not, write to the Free Software | 46 | <file:Documentation/scsi/ufs.txt>. |
42 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | 47 | However, do not compile this as a module if your root file system |
43 | # USA. | 48 | (the one containing the directory /) is located on a UFS device. |
44 | 49 | ||
45 | config SCSI_UFSHCD | 50 | config SCSI_UFSHCD_PCI |
46 | tristate "Universal Flash Storage host controller driver" | 51 | tristate "PCI bus based UFS Controller support" |
47 | depends on PCI && SCSI | 52 | depends on SCSI_UFSHCD && PCI |
48 | ---help--- | 53 | ---help--- |
49 | This is a generic driver which supports PCIe UFS Host controllers. | 54 | This selects the PCI UFS Host Controller Interface. Select this if |
55 | you have UFS Host Controller with PCI Interface. | ||
56 | |||
57 | If you have a controller with this interface, say Y or M here. | ||
58 | |||
59 | If unsure, say N. | ||
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index adf7895a6a91..9eda0dfbd6df 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile | |||
@@ -1,2 +1,3 @@ | |||
1 | # UFSHCD makefile | 1 | # UFSHCD makefile |
2 | obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o | 2 | obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o |
3 | obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o | ||
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index b207529f8d54..139bc0647b41 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h | |||
@@ -2,45 +2,35 @@ | |||
2 | * Universal Flash Storage Host controller driver | 2 | * Universal Flash Storage Host controller driver |
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/ufs/ufs.h | 4 | * This code is based on drivers/scsi/ufs/ufs.h |
5 | * Copyright (C) 2011-2012 Samsung India Software Operations | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
6 | * | 6 | * |
7 | * Santosh Yaraganavi <santosh.sy@samsung.com> | 7 | * Authors: |
8 | * Vinayak Holikatti <h.vinayak@samsung.com> | 8 | * Santosh Yaraganavi <santosh.sy@samsung.com> |
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version 2 | 13 | * as published by the Free Software Foundation; either version 2 |
13 | * of the License, or (at your option) any later version. | 14 | * of the License, or (at your option) any later version. |
15 | * See the COPYING file in the top-level directory or visit | ||
16 | * <http://www.gnu.org/licenses/gpl-2.0.html> | ||
14 | * | 17 | * |
15 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
19 | * | 22 | * |
20 | * NO WARRANTY | 23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
21 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | 24 | * without warranty of any kind. You are solely responsible for |
22 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | 25 | * determining the appropriateness of using and distributing |
23 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | 26 | * the program and assume all risks associated with your exercise |
24 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | 27 | * of rights with respect to the program, including but not limited |
25 | * solely responsible for determining the appropriateness of using and | 28 | * to infringement of third party rights, the risks and costs of |
26 | * distributing the Program and assumes all risks associated with its | 29 | * program errors, damage to or loss of data, programs or equipment, |
27 | * exercise of rights under this Agreement, including but not limited to | 30 | * and unavailability or interruption of operations. Under no |
28 | * the risks and costs of program errors, damage to or loss of data, | 31 | * circumstances will the contributor of this Program be liable for |
29 | * programs or equipment, and unavailability or interruption of operations. | 32 | * any damages of any kind arising from your use or distribution of |
30 | 33 | * this program. | |
31 | * DISCLAIMER OF LIABILITY | ||
32 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
33 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
34 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
35 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
36 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
37 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
38 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
39 | |||
40 | * You should have received a copy of the GNU General Public License | ||
41 | * along with this program; if not, write to the Free Software | ||
42 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | ||
43 | * USA. | ||
44 | */ | 34 | */ |
45 | 35 | ||
46 | #ifndef _UFS_H | 36 | #ifndef _UFS_H |
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c new file mode 100644 index 000000000000..5cb1d75f5868 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-pci.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * Universal Flash Storage Host controller PCI glue driver | ||
3 | * | ||
4 | * This code is based on drivers/scsi/ufs/ufshcd-pci.c | ||
5 | * Copyright (C) 2011-2013 Samsung India Software Operations | ||
6 | * | ||
7 | * Authors: | ||
8 | * Santosh Yaraganavi <santosh.sy@samsung.com> | ||
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version 2 | ||
14 | * of the License, or (at your option) any later version. | ||
15 | * See the COPYING file in the top-level directory or visit | ||
16 | * <http://www.gnu.org/licenses/gpl-2.0.html> | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and | ||
24 | * without warranty of any kind. You are solely responsible for | ||
25 | * determining the appropriateness of using and distributing | ||
26 | * the program and assume all risks associated with your exercise | ||
27 | * of rights with respect to the program, including but not limited | ||
28 | * to infringement of third party rights, the risks and costs of | ||
29 | * program errors, damage to or loss of data, programs or equipment, | ||
30 | * and unavailability or interruption of operations. Under no | ||
31 | * circumstances will the contributor of this Program be liable for | ||
32 | * any damages of any kind arising from your use or distribution of | ||
33 | * this program. | ||
34 | */ | ||
35 | |||
36 | #include "ufshcd.h" | ||
37 | #include <linux/pci.h> | ||
38 | |||
39 | #ifdef CONFIG_PM | ||
40 | /** | ||
41 | * ufshcd_pci_suspend - suspend power management function | ||
42 | * @pdev: pointer to PCI device handle | ||
43 | * @state: power state | ||
44 | * | ||
45 | * Returns -ENOSYS | ||
46 | */ | ||
47 | static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
48 | { | ||
49 | /* | ||
50 | * TODO: | ||
51 | * 1. Call ufshcd_suspend | ||
52 | * 2. Do bus specific power management | ||
53 | */ | ||
54 | |||
55 | return -ENOSYS; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * ufshcd_pci_resume - resume power management function | ||
60 | * @pdev: pointer to PCI device handle | ||
61 | * | ||
62 | * Returns -ENOSYS | ||
63 | */ | ||
64 | static int ufshcd_pci_resume(struct pci_dev *pdev) | ||
65 | { | ||
66 | /* | ||
67 | * TODO: | ||
68 | * 1. Call ufshcd_resume. | ||
69 | * 2. Do bus specific wake up | ||
70 | */ | ||
71 | |||
72 | return -ENOSYS; | ||
73 | } | ||
74 | #endif /* CONFIG_PM */ | ||
75 | |||
76 | /** | ||
77 | * ufshcd_pci_shutdown - main function to put the controller in reset state | ||
78 | * @pdev: pointer to PCI device handle | ||
79 | */ | ||
80 | static void ufshcd_pci_shutdown(struct pci_dev *pdev) | ||
81 | { | ||
82 | ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev)); | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space | ||
87 | * data structure memory | ||
88 | * @pdev - pointer to PCI handle | ||
89 | */ | ||
90 | static void ufshcd_pci_remove(struct pci_dev *pdev) | ||
91 | { | ||
92 | struct ufs_hba *hba = pci_get_drvdata(pdev); | ||
93 | |||
94 | disable_irq(pdev->irq); | ||
95 | free_irq(pdev->irq, hba); | ||
96 | ufshcd_remove(hba); | ||
97 | pci_release_regions(pdev); | ||
98 | pci_set_drvdata(pdev, NULL); | ||
99 | pci_clear_master(pdev); | ||
100 | pci_disable_device(pdev); | ||
101 | } | ||
102 | |||
103 | /** | ||
104 | * ufshcd_set_dma_mask - Set dma mask based on the controller | ||
105 | * addressing capability | ||
106 | * @pdev: PCI device structure | ||
107 | * | ||
108 | * Returns 0 for success, non-zero for failure | ||
109 | */ | ||
110 | static int ufshcd_set_dma_mask(struct pci_dev *pdev) | ||
111 | { | ||
112 | int err; | ||
113 | |||
114 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) | ||
115 | && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) | ||
116 | return 0; | ||
117 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
118 | if (!err) | ||
119 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
120 | return err; | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * ufshcd_pci_probe - probe routine of the driver | ||
125 | * @pdev: pointer to PCI device handle | ||
126 | * @id: PCI device id | ||
127 | * | ||
128 | * Returns 0 on success, non-zero value on failure | ||
129 | */ | ||
130 | static int | ||
131 | ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
132 | { | ||
133 | struct ufs_hba *hba; | ||
134 | void __iomem *mmio_base; | ||
135 | int err; | ||
136 | |||
137 | err = pci_enable_device(pdev); | ||
138 | if (err) { | ||
139 | dev_err(&pdev->dev, "pci_enable_device failed\n"); | ||
140 | goto out_error; | ||
141 | } | ||
142 | |||
143 | pci_set_master(pdev); | ||
144 | |||
145 | |||
146 | err = pci_request_regions(pdev, UFSHCD); | ||
147 | if (err < 0) { | ||
148 | dev_err(&pdev->dev, "request regions failed\n"); | ||
149 | goto out_disable; | ||
150 | } | ||
151 | |||
152 | mmio_base = pci_ioremap_bar(pdev, 0); | ||
153 | if (!mmio_base) { | ||
154 | dev_err(&pdev->dev, "memory map failed\n"); | ||
155 | err = -ENOMEM; | ||
156 | goto out_release_regions; | ||
157 | } | ||
158 | |||
159 | err = ufshcd_set_dma_mask(pdev); | ||
160 | if (err) { | ||
161 | dev_err(&pdev->dev, "set dma mask failed\n"); | ||
162 | goto out_iounmap; | ||
163 | } | ||
164 | |||
165 | err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); | ||
166 | if (err) { | ||
167 | dev_err(&pdev->dev, "Initialization failed\n"); | ||
168 | goto out_iounmap; | ||
169 | } | ||
170 | |||
171 | pci_set_drvdata(pdev, hba); | ||
172 | |||
173 | return 0; | ||
174 | |||
175 | out_iounmap: | ||
176 | iounmap(mmio_base); | ||
177 | out_release_regions: | ||
178 | pci_release_regions(pdev); | ||
179 | out_disable: | ||
180 | pci_clear_master(pdev); | ||
181 | pci_disable_device(pdev); | ||
182 | out_error: | ||
183 | return err; | ||
184 | } | ||
185 | |||
186 | static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = { | ||
187 | { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
188 | { } /* terminate list */ | ||
189 | }; | ||
190 | |||
191 | MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl); | ||
192 | |||
193 | static struct pci_driver ufshcd_pci_driver = { | ||
194 | .name = UFSHCD, | ||
195 | .id_table = ufshcd_pci_tbl, | ||
196 | .probe = ufshcd_pci_probe, | ||
197 | .remove = ufshcd_pci_remove, | ||
198 | .shutdown = ufshcd_pci_shutdown, | ||
199 | #ifdef CONFIG_PM | ||
200 | .suspend = ufshcd_pci_suspend, | ||
201 | .resume = ufshcd_pci_resume, | ||
202 | #endif | ||
203 | }; | ||
204 | |||
205 | module_pci_driver(ufshcd_pci_driver); | ||
206 | |||
207 | MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); | ||
208 | MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); | ||
209 | MODULE_DESCRIPTION("UFS host controller PCI glue driver"); | ||
210 | MODULE_LICENSE("GPL"); | ||
211 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); | ||
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 91a4046ca9ba..60fd40c4e4c2 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -1,77 +1,39 @@ | |||
1 | /* | 1 | /* |
2 | * Universal Flash Storage Host controller driver | 2 | * Universal Flash Storage Host controller driver Core |
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshcd.c | 4 | * This code is based on drivers/scsi/ufs/ufshcd.c |
5 | * Copyright (C) 2011-2012 Samsung India Software Operations | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
6 | * | 6 | * |
7 | * Santosh Yaraganavi <santosh.sy@samsung.com> | 7 | * Authors: |
8 | * Vinayak Holikatti <h.vinayak@samsung.com> | 8 | * Santosh Yaraganavi <santosh.sy@samsung.com> |
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version 2 | 13 | * as published by the Free Software Foundation; either version 2 |
13 | * of the License, or (at your option) any later version. | 14 | * of the License, or (at your option) any later version. |
15 | * See the COPYING file in the top-level directory or visit | ||
16 | * <http://www.gnu.org/licenses/gpl-2.0.html> | ||
14 | * | 17 | * |
15 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
19 | * | 22 | * |
20 | * NO WARRANTY | 23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
21 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | 24 | * without warranty of any kind. You are solely responsible for |
22 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | 25 | * determining the appropriateness of using and distributing |
23 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | 26 | * the program and assume all risks associated with your exercise |
24 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | 27 | * of rights with respect to the program, including but not limited |
25 | * solely responsible for determining the appropriateness of using and | 28 | * to infringement of third party rights, the risks and costs of |
26 | * distributing the Program and assumes all risks associated with its | 29 | * program errors, damage to or loss of data, programs or equipment, |
27 | * exercise of rights under this Agreement, including but not limited to | 30 | * and unavailability or interruption of operations. Under no |
28 | * the risks and costs of program errors, damage to or loss of data, | 31 | * circumstances will the contributor of this Program be liable for |
29 | * programs or equipment, and unavailability or interruption of operations. | 32 | * any damages of any kind arising from your use or distribution of |
30 | 33 | * this program. | |
31 | * DISCLAIMER OF LIABILITY | ||
32 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
33 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
34 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
35 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
36 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
37 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
38 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
39 | |||
40 | * You should have received a copy of the GNU General Public License | ||
41 | * along with this program; if not, write to the Free Software | ||
42 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | ||
43 | * USA. | ||
44 | */ | 34 | */ |
45 | 35 | ||
46 | #include <linux/module.h> | 36 | #include "ufshcd.h" |
47 | #include <linux/kernel.h> | ||
48 | #include <linux/init.h> | ||
49 | #include <linux/pci.h> | ||
50 | #include <linux/interrupt.h> | ||
51 | #include <linux/io.h> | ||
52 | #include <linux/delay.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/spinlock.h> | ||
55 | #include <linux/workqueue.h> | ||
56 | #include <linux/errno.h> | ||
57 | #include <linux/types.h> | ||
58 | #include <linux/wait.h> | ||
59 | #include <linux/bitops.h> | ||
60 | |||
61 | #include <asm/irq.h> | ||
62 | #include <asm/byteorder.h> | ||
63 | #include <scsi/scsi.h> | ||
64 | #include <scsi/scsi_cmnd.h> | ||
65 | #include <scsi/scsi_host.h> | ||
66 | #include <scsi/scsi_tcq.h> | ||
67 | #include <scsi/scsi_dbg.h> | ||
68 | #include <scsi/scsi_eh.h> | ||
69 | |||
70 | #include "ufs.h" | ||
71 | #include "ufshci.h" | ||
72 | |||
73 | #define UFSHCD "ufshcd" | ||
74 | #define UFSHCD_DRIVER_VERSION "0.1" | ||
75 | 37 | ||
76 | enum { | 38 | enum { |
77 | UFSHCD_MAX_CHANNEL = 0, | 39 | UFSHCD_MAX_CHANNEL = 0, |
@@ -102,121 +64,6 @@ enum { | |||
102 | }; | 64 | }; |
103 | 65 | ||
104 | /** | 66 | /** |
105 | * struct uic_command - UIC command structure | ||
106 | * @command: UIC command | ||
107 | * @argument1: UIC command argument 1 | ||
108 | * @argument2: UIC command argument 2 | ||
109 | * @argument3: UIC command argument 3 | ||
110 | * @cmd_active: Indicate if UIC command is outstanding | ||
111 | * @result: UIC command result | ||
112 | */ | ||
113 | struct uic_command { | ||
114 | u32 command; | ||
115 | u32 argument1; | ||
116 | u32 argument2; | ||
117 | u32 argument3; | ||
118 | int cmd_active; | ||
119 | int result; | ||
120 | }; | ||
121 | |||
122 | /** | ||
123 | * struct ufs_hba - per adapter private structure | ||
124 | * @mmio_base: UFSHCI base register address | ||
125 | * @ucdl_base_addr: UFS Command Descriptor base address | ||
126 | * @utrdl_base_addr: UTP Transfer Request Descriptor base address | ||
127 | * @utmrdl_base_addr: UTP Task Management Descriptor base address | ||
128 | * @ucdl_dma_addr: UFS Command Descriptor DMA address | ||
129 | * @utrdl_dma_addr: UTRDL DMA address | ||
130 | * @utmrdl_dma_addr: UTMRDL DMA address | ||
131 | * @host: Scsi_Host instance of the driver | ||
132 | * @pdev: PCI device handle | ||
133 | * @lrb: local reference block | ||
134 | * @outstanding_tasks: Bits representing outstanding task requests | ||
135 | * @outstanding_reqs: Bits representing outstanding transfer requests | ||
136 | * @capabilities: UFS Controller Capabilities | ||
137 | * @nutrs: Transfer Request Queue depth supported by controller | ||
138 | * @nutmrs: Task Management Queue depth supported by controller | ||
139 | * @active_uic_cmd: handle of active UIC command | ||
140 | * @ufshcd_tm_wait_queue: wait queue for task management | ||
141 | * @tm_condition: condition variable for task management | ||
142 | * @ufshcd_state: UFSHCD states | ||
143 | * @int_enable_mask: Interrupt Mask Bits | ||
144 | * @uic_workq: Work queue for UIC completion handling | ||
145 | * @feh_workq: Work queue for fatal controller error handling | ||
146 | * @errors: HBA errors | ||
147 | */ | ||
148 | struct ufs_hba { | ||
149 | void __iomem *mmio_base; | ||
150 | |||
151 | /* Virtual memory reference */ | ||
152 | struct utp_transfer_cmd_desc *ucdl_base_addr; | ||
153 | struct utp_transfer_req_desc *utrdl_base_addr; | ||
154 | struct utp_task_req_desc *utmrdl_base_addr; | ||
155 | |||
156 | /* DMA memory reference */ | ||
157 | dma_addr_t ucdl_dma_addr; | ||
158 | dma_addr_t utrdl_dma_addr; | ||
159 | dma_addr_t utmrdl_dma_addr; | ||
160 | |||
161 | struct Scsi_Host *host; | ||
162 | struct pci_dev *pdev; | ||
163 | |||
164 | struct ufshcd_lrb *lrb; | ||
165 | |||
166 | unsigned long outstanding_tasks; | ||
167 | unsigned long outstanding_reqs; | ||
168 | |||
169 | u32 capabilities; | ||
170 | int nutrs; | ||
171 | int nutmrs; | ||
172 | u32 ufs_version; | ||
173 | |||
174 | struct uic_command active_uic_cmd; | ||
175 | wait_queue_head_t ufshcd_tm_wait_queue; | ||
176 | unsigned long tm_condition; | ||
177 | |||
178 | u32 ufshcd_state; | ||
179 | u32 int_enable_mask; | ||
180 | |||
181 | /* Work Queues */ | ||
182 | struct work_struct uic_workq; | ||
183 | struct work_struct feh_workq; | ||
184 | |||
185 | /* HBA Errors */ | ||
186 | u32 errors; | ||
187 | }; | ||
188 | |||
189 | /** | ||
190 | * struct ufshcd_lrb - local reference block | ||
191 | * @utr_descriptor_ptr: UTRD address of the command | ||
192 | * @ucd_cmd_ptr: UCD address of the command | ||
193 | * @ucd_rsp_ptr: Response UPIU address for this command | ||
194 | * @ucd_prdt_ptr: PRDT address of the command | ||
195 | * @cmd: pointer to SCSI command | ||
196 | * @sense_buffer: pointer to sense buffer address of the SCSI command | ||
197 | * @sense_bufflen: Length of the sense buffer | ||
198 | * @scsi_status: SCSI status of the command | ||
199 | * @command_type: SCSI, UFS, Query. | ||
200 | * @task_tag: Task tag of the command | ||
201 | * @lun: LUN of the command | ||
202 | */ | ||
203 | struct ufshcd_lrb { | ||
204 | struct utp_transfer_req_desc *utr_descriptor_ptr; | ||
205 | struct utp_upiu_cmd *ucd_cmd_ptr; | ||
206 | struct utp_upiu_rsp *ucd_rsp_ptr; | ||
207 | struct ufshcd_sg_entry *ucd_prdt_ptr; | ||
208 | |||
209 | struct scsi_cmnd *cmd; | ||
210 | u8 *sense_buffer; | ||
211 | unsigned int sense_bufflen; | ||
212 | int scsi_status; | ||
213 | |||
214 | int command_type; | ||
215 | int task_tag; | ||
216 | unsigned int lun; | ||
217 | }; | ||
218 | |||
219 | /** | ||
220 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA | 67 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA |
221 | * @hba - Pointer to adapter instance | 68 | * @hba - Pointer to adapter instance |
222 | * | 69 | * |
@@ -335,21 +182,21 @@ static inline void ufshcd_free_hba_memory(struct ufs_hba *hba) | |||
335 | 182 | ||
336 | if (hba->utmrdl_base_addr) { | 183 | if (hba->utmrdl_base_addr) { |
337 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | 184 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; |
338 | dma_free_coherent(&hba->pdev->dev, utmrdl_size, | 185 | dma_free_coherent(hba->dev, utmrdl_size, |
339 | hba->utmrdl_base_addr, hba->utmrdl_dma_addr); | 186 | hba->utmrdl_base_addr, hba->utmrdl_dma_addr); |
340 | } | 187 | } |
341 | 188 | ||
342 | if (hba->utrdl_base_addr) { | 189 | if (hba->utrdl_base_addr) { |
343 | utrdl_size = | 190 | utrdl_size = |
344 | (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | 191 | (sizeof(struct utp_transfer_req_desc) * hba->nutrs); |
345 | dma_free_coherent(&hba->pdev->dev, utrdl_size, | 192 | dma_free_coherent(hba->dev, utrdl_size, |
346 | hba->utrdl_base_addr, hba->utrdl_dma_addr); | 193 | hba->utrdl_base_addr, hba->utrdl_dma_addr); |
347 | } | 194 | } |
348 | 195 | ||
349 | if (hba->ucdl_base_addr) { | 196 | if (hba->ucdl_base_addr) { |
350 | ucdl_size = | 197 | ucdl_size = |
351 | (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | 198 | (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); |
352 | dma_free_coherent(&hba->pdev->dev, ucdl_size, | 199 | dma_free_coherent(hba->dev, ucdl_size, |
353 | hba->ucdl_base_addr, hba->ucdl_dma_addr); | 200 | hba->ucdl_base_addr, hba->ucdl_dma_addr); |
354 | } | 201 | } |
355 | } | 202 | } |
@@ -429,15 +276,6 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) | |||
429 | } | 276 | } |
430 | 277 | ||
431 | /** | 278 | /** |
432 | * ufshcd_hba_stop - Send controller to reset state | ||
433 | * @hba: per adapter instance | ||
434 | */ | ||
435 | static inline void ufshcd_hba_stop(struct ufs_hba *hba) | ||
436 | { | ||
437 | writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE)); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * ufshcd_hba_start - Start controller initialization sequence | 279 | * ufshcd_hba_start - Start controller initialization sequence |
442 | * @hba: per adapter instance | 280 | * @hba: per adapter instance |
443 | */ | 281 | */ |
@@ -724,7 +562,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) | |||
724 | 562 | ||
725 | /* Allocate memory for UTP command descriptors */ | 563 | /* Allocate memory for UTP command descriptors */ |
726 | ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | 564 | ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); |
727 | hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev, | 565 | hba->ucdl_base_addr = dma_alloc_coherent(hba->dev, |
728 | ucdl_size, | 566 | ucdl_size, |
729 | &hba->ucdl_dma_addr, | 567 | &hba->ucdl_dma_addr, |
730 | GFP_KERNEL); | 568 | GFP_KERNEL); |
@@ -737,7 +575,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) | |||
737 | */ | 575 | */ |
738 | if (!hba->ucdl_base_addr || | 576 | if (!hba->ucdl_base_addr || |
739 | WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { | 577 | WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { |
740 | dev_err(&hba->pdev->dev, | 578 | dev_err(hba->dev, |
741 | "Command Descriptor Memory allocation failed\n"); | 579 | "Command Descriptor Memory allocation failed\n"); |
742 | goto out; | 580 | goto out; |
743 | } | 581 | } |
@@ -747,13 +585,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) | |||
747 | * UFSHCI requires 1024 byte alignment of UTRD | 585 | * UFSHCI requires 1024 byte alignment of UTRD |
748 | */ | 586 | */ |
749 | utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | 587 | utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); |
750 | hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev, | 588 | hba->utrdl_base_addr = dma_alloc_coherent(hba->dev, |
751 | utrdl_size, | 589 | utrdl_size, |
752 | &hba->utrdl_dma_addr, | 590 | &hba->utrdl_dma_addr, |
753 | GFP_KERNEL); | 591 | GFP_KERNEL); |
754 | if (!hba->utrdl_base_addr || | 592 | if (!hba->utrdl_base_addr || |
755 | WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { | 593 | WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { |
756 | dev_err(&hba->pdev->dev, | 594 | dev_err(hba->dev, |
757 | "Transfer Descriptor Memory allocation failed\n"); | 595 | "Transfer Descriptor Memory allocation failed\n"); |
758 | goto out; | 596 | goto out; |
759 | } | 597 | } |
@@ -763,13 +601,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) | |||
763 | * UFSHCI requires 1024 byte alignment of UTMRD | 601 | * UFSHCI requires 1024 byte alignment of UTMRD |
764 | */ | 602 | */ |
765 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | 603 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; |
766 | hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev, | 604 | hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev, |
767 | utmrdl_size, | 605 | utmrdl_size, |
768 | &hba->utmrdl_dma_addr, | 606 | &hba->utmrdl_dma_addr, |
769 | GFP_KERNEL); | 607 | GFP_KERNEL); |
770 | if (!hba->utmrdl_base_addr || | 608 | if (!hba->utmrdl_base_addr || |
771 | WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { | 609 | WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { |
772 | dev_err(&hba->pdev->dev, | 610 | dev_err(hba->dev, |
773 | "Task Management Descriptor Memory allocation failed\n"); | 611 | "Task Management Descriptor Memory allocation failed\n"); |
774 | goto out; | 612 | goto out; |
775 | } | 613 | } |
@@ -777,7 +615,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) | |||
777 | /* Allocate memory for local reference block */ | 615 | /* Allocate memory for local reference block */ |
778 | hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL); | 616 | hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL); |
779 | if (!hba->lrb) { | 617 | if (!hba->lrb) { |
780 | dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n"); | 618 | dev_err(hba->dev, "LRB Memory allocation failed\n"); |
781 | goto out; | 619 | goto out; |
782 | } | 620 | } |
783 | return 0; | 621 | return 0; |
@@ -867,7 +705,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) | |||
867 | /* check if controller is ready to accept UIC commands */ | 705 | /* check if controller is ready to accept UIC commands */ |
868 | if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) & | 706 | if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) & |
869 | UIC_COMMAND_READY) == 0x0) { | 707 | UIC_COMMAND_READY) == 0x0) { |
870 | dev_err(&hba->pdev->dev, | 708 | dev_err(hba->dev, |
871 | "Controller not ready" | 709 | "Controller not ready" |
872 | " to accept UIC commands\n"); | 710 | " to accept UIC commands\n"); |
873 | return -EIO; | 711 | return -EIO; |
@@ -912,7 +750,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |||
912 | /* check if device present */ | 750 | /* check if device present */ |
913 | reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS)); | 751 | reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS)); |
914 | if (!ufshcd_is_device_present(reg)) { | 752 | if (!ufshcd_is_device_present(reg)) { |
915 | dev_err(&hba->pdev->dev, "cc: Device not present\n"); | 753 | dev_err(hba->dev, "cc: Device not present\n"); |
916 | err = -ENXIO; | 754 | err = -ENXIO; |
917 | goto out; | 755 | goto out; |
918 | } | 756 | } |
@@ -924,7 +762,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |||
924 | if (!(ufshcd_get_lists_status(reg))) { | 762 | if (!(ufshcd_get_lists_status(reg))) { |
925 | ufshcd_enable_run_stop_reg(hba); | 763 | ufshcd_enable_run_stop_reg(hba); |
926 | } else { | 764 | } else { |
927 | dev_err(&hba->pdev->dev, | 765 | dev_err(hba->dev, |
928 | "Host controller not ready to process requests"); | 766 | "Host controller not ready to process requests"); |
929 | err = -EIO; | 767 | err = -EIO; |
930 | goto out; | 768 | goto out; |
@@ -1005,7 +843,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) | |||
1005 | if (retry) { | 843 | if (retry) { |
1006 | retry--; | 844 | retry--; |
1007 | } else { | 845 | } else { |
1008 | dev_err(&hba->pdev->dev, | 846 | dev_err(hba->dev, |
1009 | "Controller enable failed\n"); | 847 | "Controller enable failed\n"); |
1010 | return -EIO; | 848 | return -EIO; |
1011 | } | 849 | } |
@@ -1084,7 +922,7 @@ static int ufshcd_do_reset(struct ufs_hba *hba) | |||
1084 | 922 | ||
1085 | /* start the initialization process */ | 923 | /* start the initialization process */ |
1086 | if (ufshcd_initialize_hba(hba)) { | 924 | if (ufshcd_initialize_hba(hba)) { |
1087 | dev_err(&hba->pdev->dev, | 925 | dev_err(hba->dev, |
1088 | "Reset: Controller initialization failed\n"); | 926 | "Reset: Controller initialization failed\n"); |
1089 | return FAILED; | 927 | return FAILED; |
1090 | } | 928 | } |
@@ -1167,7 +1005,7 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) | |||
1167 | task_result = SUCCESS; | 1005 | task_result = SUCCESS; |
1168 | } else { | 1006 | } else { |
1169 | task_result = FAILED; | 1007 | task_result = FAILED; |
1170 | dev_err(&hba->pdev->dev, | 1008 | dev_err(hba->dev, |
1171 | "trc: Invalid ocs = %x\n", ocs_value); | 1009 | "trc: Invalid ocs = %x\n", ocs_value); |
1172 | } | 1010 | } |
1173 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1011 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
@@ -1281,7 +1119,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |||
1281 | /* check if the returned transfer response is valid */ | 1119 | /* check if the returned transfer response is valid */ |
1282 | result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr); | 1120 | result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr); |
1283 | if (result) { | 1121 | if (result) { |
1284 | dev_err(&hba->pdev->dev, | 1122 | dev_err(hba->dev, |
1285 | "Invalid response = %x\n", result); | 1123 | "Invalid response = %x\n", result); |
1286 | break; | 1124 | break; |
1287 | } | 1125 | } |
@@ -1310,7 +1148,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |||
1310 | case OCS_FATAL_ERROR: | 1148 | case OCS_FATAL_ERROR: |
1311 | default: | 1149 | default: |
1312 | result |= DID_ERROR << 16; | 1150 | result |= DID_ERROR << 16; |
1313 | dev_err(&hba->pdev->dev, | 1151 | dev_err(hba->dev, |
1314 | "OCS error from controller = %x\n", ocs); | 1152 | "OCS error from controller = %x\n", ocs); |
1315 | break; | 1153 | break; |
1316 | } /* end of switch */ | 1154 | } /* end of switch */ |
@@ -1374,7 +1212,7 @@ static void ufshcd_uic_cc_handler (struct work_struct *work) | |||
1374 | !(ufshcd_get_uic_cmd_result(hba))) { | 1212 | !(ufshcd_get_uic_cmd_result(hba))) { |
1375 | 1213 | ||
1376 | if (ufshcd_make_hba_operational(hba)) | 1214 | if (ufshcd_make_hba_operational(hba)) |
1377 | dev_err(&hba->pdev->dev, | 1215 | dev_err(hba->dev, |
1378 | "cc: hba not operational state\n"); | 1216 | "cc: hba not operational state\n"); |
1379 | return; | 1217 | return; |
1380 | } | 1218 | } |
@@ -1509,7 +1347,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba, | |||
1509 | free_slot = ufshcd_get_tm_free_slot(hba); | 1347 | free_slot = ufshcd_get_tm_free_slot(hba); |
1510 | if (free_slot >= hba->nutmrs) { | 1348 | if (free_slot >= hba->nutmrs) { |
1511 | spin_unlock_irqrestore(host->host_lock, flags); | 1349 | spin_unlock_irqrestore(host->host_lock, flags); |
1512 | dev_err(&hba->pdev->dev, "Task management queue full\n"); | 1350 | dev_err(hba->dev, "Task management queue full\n"); |
1513 | err = FAILED; | 1351 | err = FAILED; |
1514 | goto out; | 1352 | goto out; |
1515 | } | 1353 | } |
@@ -1552,7 +1390,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba, | |||
1552 | &hba->tm_condition) != 0), | 1390 | &hba->tm_condition) != 0), |
1553 | 60 * HZ); | 1391 | 60 * HZ); |
1554 | if (!err) { | 1392 | if (!err) { |
1555 | dev_err(&hba->pdev->dev, | 1393 | dev_err(hba->dev, |
1556 | "Task management command timed-out\n"); | 1394 | "Task management command timed-out\n"); |
1557 | err = FAILED; | 1395 | err = FAILED; |
1558 | goto out; | 1396 | goto out; |
@@ -1688,23 +1526,13 @@ static struct scsi_host_template ufshcd_driver_template = { | |||
1688 | }; | 1526 | }; |
1689 | 1527 | ||
1690 | /** | 1528 | /** |
1691 | * ufshcd_shutdown - main function to put the controller in reset state | ||
1692 | * @pdev: pointer to PCI device handle | ||
1693 | */ | ||
1694 | static void ufshcd_shutdown(struct pci_dev *pdev) | ||
1695 | { | ||
1696 | ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev)); | ||
1697 | } | ||
1698 | |||
1699 | #ifdef CONFIG_PM | ||
1700 | /** | ||
1701 | * ufshcd_suspend - suspend power management function | 1529 | * ufshcd_suspend - suspend power management function |
1702 | * @pdev: pointer to PCI device handle | 1530 | * @hba: per adapter instance |
1703 | * @state: power state | 1531 | * @state: power state |
1704 | * | 1532 | * |
1705 | * Returns -ENOSYS | 1533 | * Returns -ENOSYS |
1706 | */ | 1534 | */ |
1707 | static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state) | 1535 | int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state) |
1708 | { | 1536 | { |
1709 | /* | 1537 | /* |
1710 | * TODO: | 1538 | * TODO: |
@@ -1717,14 +1545,15 @@ static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state) | |||
1717 | 1545 | ||
1718 | return -ENOSYS; | 1546 | return -ENOSYS; |
1719 | } | 1547 | } |
1548 | EXPORT_SYMBOL_GPL(ufshcd_suspend); | ||
1720 | 1549 | ||
1721 | /** | 1550 | /** |
1722 | * ufshcd_resume - resume power management function | 1551 | * ufshcd_resume - resume power management function |
1723 | * @pdev: pointer to PCI device handle | 1552 | * @hba: per adapter instance |
1724 | * | 1553 | * |
1725 | * Returns -ENOSYS | 1554 | * Returns -ENOSYS |
1726 | */ | 1555 | */ |
1727 | static int ufshcd_resume(struct pci_dev *pdev) | 1556 | int ufshcd_resume(struct ufs_hba *hba) |
1728 | { | 1557 | { |
1729 | /* | 1558 | /* |
1730 | * TODO: | 1559 | * TODO: |
@@ -1737,7 +1566,7 @@ static int ufshcd_resume(struct pci_dev *pdev) | |||
1737 | 1566 | ||
1738 | return -ENOSYS; | 1567 | return -ENOSYS; |
1739 | } | 1568 | } |
1740 | #endif /* CONFIG_PM */ | 1569 | EXPORT_SYMBOL_GPL(ufshcd_resume); |
1741 | 1570 | ||
1742 | /** | 1571 | /** |
1743 | * ufshcd_hba_free - free allocated memory for | 1572 | * ufshcd_hba_free - free allocated memory for |
@@ -1748,107 +1577,67 @@ static void ufshcd_hba_free(struct ufs_hba *hba) | |||
1748 | { | 1577 | { |
1749 | iounmap(hba->mmio_base); | 1578 | iounmap(hba->mmio_base); |
1750 | ufshcd_free_hba_memory(hba); | 1579 | ufshcd_free_hba_memory(hba); |
1751 | pci_release_regions(hba->pdev); | ||
1752 | } | 1580 | } |
1753 | 1581 | ||
1754 | /** | 1582 | /** |
1755 | * ufshcd_remove - de-allocate PCI/SCSI host and host memory space | 1583 | * ufshcd_remove - de-allocate SCSI host and host memory space |
1756 | * data structure memory | 1584 | * data structure memory |
1757 | * @pdev - pointer to PCI handle | 1585 | * @hba - per adapter instance |
1758 | */ | 1586 | */ |
1759 | static void ufshcd_remove(struct pci_dev *pdev) | 1587 | void ufshcd_remove(struct ufs_hba *hba) |
1760 | { | 1588 | { |
1761 | struct ufs_hba *hba = pci_get_drvdata(pdev); | ||
1762 | |||
1763 | /* disable interrupts */ | 1589 | /* disable interrupts */ |
1764 | ufshcd_int_config(hba, UFSHCD_INT_DISABLE); | 1590 | ufshcd_int_config(hba, UFSHCD_INT_DISABLE); |
1765 | free_irq(pdev->irq, hba); | ||
1766 | 1591 | ||
1767 | ufshcd_hba_stop(hba); | 1592 | ufshcd_hba_stop(hba); |
1768 | ufshcd_hba_free(hba); | 1593 | ufshcd_hba_free(hba); |
1769 | 1594 | ||
1770 | scsi_remove_host(hba->host); | 1595 | scsi_remove_host(hba->host); |
1771 | scsi_host_put(hba->host); | 1596 | scsi_host_put(hba->host); |
1772 | pci_set_drvdata(pdev, NULL); | ||
1773 | pci_clear_master(pdev); | ||
1774 | pci_disable_device(pdev); | ||
1775 | } | ||
1776 | |||
1777 | /** | ||
1778 | * ufshcd_set_dma_mask - Set dma mask based on the controller | ||
1779 | * addressing capability | ||
1780 | * @pdev: PCI device structure | ||
1781 | * | ||
1782 | * Returns 0 for success, non-zero for failure | ||
1783 | */ | ||
1784 | static int ufshcd_set_dma_mask(struct ufs_hba *hba) | ||
1785 | { | ||
1786 | int err; | ||
1787 | u64 dma_mask; | ||
1788 | |||
1789 | /* | ||
1790 | * If controller supports 64 bit addressing mode, then set the DMA | ||
1791 | * mask to 64-bit, else set the DMA mask to 32-bit | ||
1792 | */ | ||
1793 | if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) | ||
1794 | dma_mask = DMA_BIT_MASK(64); | ||
1795 | else | ||
1796 | dma_mask = DMA_BIT_MASK(32); | ||
1797 | |||
1798 | err = pci_set_dma_mask(hba->pdev, dma_mask); | ||
1799 | if (err) | ||
1800 | return err; | ||
1801 | |||
1802 | err = pci_set_consistent_dma_mask(hba->pdev, dma_mask); | ||
1803 | |||
1804 | return err; | ||
1805 | } | 1597 | } |
1598 | EXPORT_SYMBOL_GPL(ufshcd_remove); | ||
1806 | 1599 | ||
1807 | /** | 1600 | /** |
1808 | * ufshcd_probe - probe routine of the driver | 1601 | * ufshcd_init - Driver initialization routine |
1809 | * @pdev: pointer to PCI device handle | 1602 | * @dev: pointer to device handle |
1810 | * @id: PCI device id | 1603 | * @hba_handle: driver private handle |
1811 | * | 1604 | * @mmio_base: base register address |
1605 | * @irq: Interrupt line of device | ||
1812 | * Returns 0 on success, non-zero value on failure | 1606 | * Returns 0 on success, non-zero value on failure |
1813 | */ | 1607 | */ |
1814 | static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 1608 | int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, |
1609 | void __iomem *mmio_base, unsigned int irq) | ||
1815 | { | 1610 | { |
1816 | struct Scsi_Host *host; | 1611 | struct Scsi_Host *host; |
1817 | struct ufs_hba *hba; | 1612 | struct ufs_hba *hba; |
1818 | int err; | 1613 | int err; |
1819 | 1614 | ||
1820 | err = pci_enable_device(pdev); | 1615 | if (!dev) { |
1821 | if (err) { | 1616 | dev_err(dev, |
1822 | dev_err(&pdev->dev, "pci_enable_device failed\n"); | 1617 | "Invalid memory reference for dev is NULL\n"); |
1618 | err = -ENODEV; | ||
1823 | goto out_error; | 1619 | goto out_error; |
1824 | } | 1620 | } |
1825 | 1621 | ||
1826 | pci_set_master(pdev); | 1622 | if (!mmio_base) { |
1623 | dev_err(dev, | ||
1624 | "Invalid memory reference for mmio_base is NULL\n"); | ||
1625 | err = -ENODEV; | ||
1626 | goto out_error; | ||
1627 | } | ||
1827 | 1628 | ||
1828 | host = scsi_host_alloc(&ufshcd_driver_template, | 1629 | host = scsi_host_alloc(&ufshcd_driver_template, |
1829 | sizeof(struct ufs_hba)); | 1630 | sizeof(struct ufs_hba)); |
1830 | if (!host) { | 1631 | if (!host) { |
1831 | dev_err(&pdev->dev, "scsi_host_alloc failed\n"); | 1632 | dev_err(dev, "scsi_host_alloc failed\n"); |
1832 | err = -ENOMEM; | 1633 | err = -ENOMEM; |
1833 | goto out_disable; | 1634 | goto out_error; |
1834 | } | 1635 | } |
1835 | hba = shost_priv(host); | 1636 | hba = shost_priv(host); |
1836 | |||
1837 | err = pci_request_regions(pdev, UFSHCD); | ||
1838 | if (err < 0) { | ||
1839 | dev_err(&pdev->dev, "request regions failed\n"); | ||
1840 | goto out_host_put; | ||
1841 | } | ||
1842 | |||
1843 | hba->mmio_base = pci_ioremap_bar(pdev, 0); | ||
1844 | if (!hba->mmio_base) { | ||
1845 | dev_err(&pdev->dev, "memory map failed\n"); | ||
1846 | err = -ENOMEM; | ||
1847 | goto out_release_regions; | ||
1848 | } | ||
1849 | |||
1850 | hba->host = host; | 1637 | hba->host = host; |
1851 | hba->pdev = pdev; | 1638 | hba->dev = dev; |
1639 | hba->mmio_base = mmio_base; | ||
1640 | hba->irq = irq; | ||
1852 | 1641 | ||
1853 | /* Read capabilities registers */ | 1642 | /* Read capabilities registers */ |
1854 | ufshcd_hba_capabilities(hba); | 1643 | ufshcd_hba_capabilities(hba); |
@@ -1856,17 +1645,11 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1856 | /* Get UFS version supported by the controller */ | 1645 | /* Get UFS version supported by the controller */ |
1857 | hba->ufs_version = ufshcd_get_ufs_version(hba); | 1646 | hba->ufs_version = ufshcd_get_ufs_version(hba); |
1858 | 1647 | ||
1859 | err = ufshcd_set_dma_mask(hba); | ||
1860 | if (err) { | ||
1861 | dev_err(&pdev->dev, "set dma mask failed\n"); | ||
1862 | goto out_iounmap; | ||
1863 | } | ||
1864 | |||
1865 | /* Allocate memory for host memory space */ | 1648 | /* Allocate memory for host memory space */ |
1866 | err = ufshcd_memory_alloc(hba); | 1649 | err = ufshcd_memory_alloc(hba); |
1867 | if (err) { | 1650 | if (err) { |
1868 | dev_err(&pdev->dev, "Memory allocation failed\n"); | 1651 | dev_err(hba->dev, "Memory allocation failed\n"); |
1869 | goto out_iounmap; | 1652 | goto out_disable; |
1870 | } | 1653 | } |
1871 | 1654 | ||
1872 | /* Configure LRB */ | 1655 | /* Configure LRB */ |
@@ -1888,76 +1671,50 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1888 | INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); | 1671 | INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); |
1889 | 1672 | ||
1890 | /* IRQ registration */ | 1673 | /* IRQ registration */ |
1891 | err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); | 1674 | err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); |
1892 | if (err) { | 1675 | if (err) { |
1893 | dev_err(&pdev->dev, "request irq failed\n"); | 1676 | dev_err(hba->dev, "request irq failed\n"); |
1894 | goto out_lrb_free; | 1677 | goto out_lrb_free; |
1895 | } | 1678 | } |
1896 | 1679 | ||
1897 | /* Enable SCSI tag mapping */ | 1680 | /* Enable SCSI tag mapping */ |
1898 | err = scsi_init_shared_tag_map(host, host->can_queue); | 1681 | err = scsi_init_shared_tag_map(host, host->can_queue); |
1899 | if (err) { | 1682 | if (err) { |
1900 | dev_err(&pdev->dev, "init shared queue failed\n"); | 1683 | dev_err(hba->dev, "init shared queue failed\n"); |
1901 | goto out_free_irq; | 1684 | goto out_free_irq; |
1902 | } | 1685 | } |
1903 | 1686 | ||
1904 | pci_set_drvdata(pdev, hba); | 1687 | err = scsi_add_host(host, hba->dev); |
1905 | |||
1906 | err = scsi_add_host(host, &pdev->dev); | ||
1907 | if (err) { | 1688 | if (err) { |
1908 | dev_err(&pdev->dev, "scsi_add_host failed\n"); | 1689 | dev_err(hba->dev, "scsi_add_host failed\n"); |
1909 | goto out_free_irq; | 1690 | goto out_free_irq; |
1910 | } | 1691 | } |
1911 | 1692 | ||
1912 | /* Initialization routine */ | 1693 | /* Initialization routine */ |
1913 | err = ufshcd_initialize_hba(hba); | 1694 | err = ufshcd_initialize_hba(hba); |
1914 | if (err) { | 1695 | if (err) { |
1915 | dev_err(&pdev->dev, "Initialization failed\n"); | 1696 | dev_err(hba->dev, "Initialization failed\n"); |
1916 | goto out_free_irq; | 1697 | goto out_remove_scsi_host; |
1917 | } | 1698 | } |
1699 | *hba_handle = hba; | ||
1918 | 1700 | ||
1919 | return 0; | 1701 | return 0; |
1920 | 1702 | ||
1703 | out_remove_scsi_host: | ||
1704 | scsi_remove_host(hba->host); | ||
1921 | out_free_irq: | 1705 | out_free_irq: |
1922 | free_irq(pdev->irq, hba); | 1706 | free_irq(irq, hba); |
1923 | out_lrb_free: | 1707 | out_lrb_free: |
1924 | ufshcd_free_hba_memory(hba); | 1708 | ufshcd_free_hba_memory(hba); |
1925 | out_iounmap: | ||
1926 | iounmap(hba->mmio_base); | ||
1927 | out_release_regions: | ||
1928 | pci_release_regions(pdev); | ||
1929 | out_host_put: | ||
1930 | scsi_host_put(host); | ||
1931 | out_disable: | 1709 | out_disable: |
1932 | pci_clear_master(pdev); | 1710 | scsi_host_put(host); |
1933 | pci_disable_device(pdev); | ||
1934 | out_error: | 1711 | out_error: |
1935 | return err; | 1712 | return err; |
1936 | } | 1713 | } |
1714 | EXPORT_SYMBOL_GPL(ufshcd_init); | ||
1937 | 1715 | ||
1938 | static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = { | 1716 | MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); |
1939 | { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 1717 | MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); |
1940 | { } /* terminate list */ | 1718 | MODULE_DESCRIPTION("Generic UFS host controller driver Core"); |
1941 | }; | ||
1942 | |||
1943 | MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl); | ||
1944 | |||
1945 | static struct pci_driver ufshcd_pci_driver = { | ||
1946 | .name = UFSHCD, | ||
1947 | .id_table = ufshcd_pci_tbl, | ||
1948 | .probe = ufshcd_probe, | ||
1949 | .remove = ufshcd_remove, | ||
1950 | .shutdown = ufshcd_shutdown, | ||
1951 | #ifdef CONFIG_PM | ||
1952 | .suspend = ufshcd_suspend, | ||
1953 | .resume = ufshcd_resume, | ||
1954 | #endif | ||
1955 | }; | ||
1956 | |||
1957 | module_pci_driver(ufshcd_pci_driver); | ||
1958 | |||
1959 | MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, " | ||
1960 | "Vinayak Holikatti <h.vinayak@samsung.com>"); | ||
1961 | MODULE_DESCRIPTION("Generic UFS host controller driver"); | ||
1962 | MODULE_LICENSE("GPL"); | 1719 | MODULE_LICENSE("GPL"); |
1963 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); | 1720 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); |
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h new file mode 100644 index 000000000000..6b99a42f5819 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd.h | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * Universal Flash Storage Host controller driver | ||
3 | * | ||
4 | * This code is based on drivers/scsi/ufs/ufshcd.h | ||
5 | * Copyright (C) 2011-2013 Samsung India Software Operations | ||
6 | * | ||
7 | * Authors: | ||
8 | * Santosh Yaraganavi <santosh.sy@samsung.com> | ||
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version 2 | ||
14 | * of the License, or (at your option) any later version. | ||
15 | * See the COPYING file in the top-level directory or visit | ||
16 | * <http://www.gnu.org/licenses/gpl-2.0.html> | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and | ||
24 | * without warranty of any kind. You are solely responsible for | ||
25 | * determining the appropriateness of using and distributing | ||
26 | * the program and assume all risks associated with your exercise | ||
27 | * of rights with respect to the program, including but not limited | ||
28 | * to infringement of third party rights, the risks and costs of | ||
29 | * program errors, damage to or loss of data, programs or equipment, | ||
30 | * and unavailability or interruption of operations. Under no | ||
31 | * circumstances will the contributor of this Program be liable for | ||
32 | * any damages of any kind arising from your use or distribution of | ||
33 | * this program. | ||
34 | */ | ||
35 | |||
36 | #ifndef _UFSHCD_H | ||
37 | #define _UFSHCD_H | ||
38 | |||
39 | #include <linux/module.h> | ||
40 | #include <linux/kernel.h> | ||
41 | #include <linux/init.h> | ||
42 | #include <linux/interrupt.h> | ||
43 | #include <linux/io.h> | ||
44 | #include <linux/delay.h> | ||
45 | #include <linux/slab.h> | ||
46 | #include <linux/spinlock.h> | ||
47 | #include <linux/workqueue.h> | ||
48 | #include <linux/errno.h> | ||
49 | #include <linux/types.h> | ||
50 | #include <linux/wait.h> | ||
51 | #include <linux/bitops.h> | ||
52 | #include <linux/pm_runtime.h> | ||
53 | #include <linux/clk.h> | ||
54 | |||
55 | #include <asm/irq.h> | ||
56 | #include <asm/byteorder.h> | ||
57 | #include <scsi/scsi.h> | ||
58 | #include <scsi/scsi_cmnd.h> | ||
59 | #include <scsi/scsi_host.h> | ||
60 | #include <scsi/scsi_tcq.h> | ||
61 | #include <scsi/scsi_dbg.h> | ||
62 | #include <scsi/scsi_eh.h> | ||
63 | |||
64 | #include "ufs.h" | ||
65 | #include "ufshci.h" | ||
66 | |||
67 | #define UFSHCD "ufshcd" | ||
68 | #define UFSHCD_DRIVER_VERSION "0.2" | ||
69 | |||
70 | /** | ||
71 | * struct uic_command - UIC command structure | ||
72 | * @command: UIC command | ||
73 | * @argument1: UIC command argument 1 | ||
74 | * @argument2: UIC command argument 2 | ||
75 | * @argument3: UIC command argument 3 | ||
76 | * @cmd_active: Indicate if UIC command is outstanding | ||
77 | * @result: UIC command result | ||
78 | */ | ||
79 | struct uic_command { | ||
80 | u32 command; | ||
81 | u32 argument1; | ||
82 | u32 argument2; | ||
83 | u32 argument3; | ||
84 | int cmd_active; | ||
85 | int result; | ||
86 | }; | ||
87 | |||
88 | /** | ||
89 | * struct ufshcd_lrb - local reference block | ||
90 | * @utr_descriptor_ptr: UTRD address of the command | ||
91 | * @ucd_cmd_ptr: UCD address of the command | ||
92 | * @ucd_rsp_ptr: Response UPIU address for this command | ||
93 | * @ucd_prdt_ptr: PRDT address of the command | ||
94 | * @cmd: pointer to SCSI command | ||
95 | * @sense_buffer: pointer to sense buffer address of the SCSI command | ||
96 | * @sense_bufflen: Length of the sense buffer | ||
97 | * @scsi_status: SCSI status of the command | ||
98 | * @command_type: SCSI, UFS, Query. | ||
99 | * @task_tag: Task tag of the command | ||
100 | * @lun: LUN of the command | ||
101 | */ | ||
102 | struct ufshcd_lrb { | ||
103 | struct utp_transfer_req_desc *utr_descriptor_ptr; | ||
104 | struct utp_upiu_cmd *ucd_cmd_ptr; | ||
105 | struct utp_upiu_rsp *ucd_rsp_ptr; | ||
106 | struct ufshcd_sg_entry *ucd_prdt_ptr; | ||
107 | |||
108 | struct scsi_cmnd *cmd; | ||
109 | u8 *sense_buffer; | ||
110 | unsigned int sense_bufflen; | ||
111 | int scsi_status; | ||
112 | |||
113 | int command_type; | ||
114 | int task_tag; | ||
115 | unsigned int lun; | ||
116 | }; | ||
117 | |||
118 | |||
119 | /** | ||
120 | * struct ufs_hba - per adapter private structure | ||
121 | * @mmio_base: UFSHCI base register address | ||
122 | * @ucdl_base_addr: UFS Command Descriptor base address | ||
123 | * @utrdl_base_addr: UTP Transfer Request Descriptor base address | ||
124 | * @utmrdl_base_addr: UTP Task Management Descriptor base address | ||
125 | * @ucdl_dma_addr: UFS Command Descriptor DMA address | ||
126 | * @utrdl_dma_addr: UTRDL DMA address | ||
127 | * @utmrdl_dma_addr: UTMRDL DMA address | ||
128 | * @host: Scsi_Host instance of the driver | ||
129 | * @dev: device handle | ||
130 | * @lrb: local reference block | ||
131 | * @outstanding_tasks: Bits representing outstanding task requests | ||
132 | * @outstanding_reqs: Bits representing outstanding transfer requests | ||
133 | * @capabilities: UFS Controller Capabilities | ||
134 | * @nutrs: Transfer Request Queue depth supported by controller | ||
135 | * @nutmrs: Task Management Queue depth supported by controller | ||
136 | * @ufs_version: UFS Version to which controller complies | ||
137 | * @irq: Irq number of the controller | ||
138 | * @active_uic_cmd: handle of active UIC command | ||
139 | * @ufshcd_tm_wait_queue: wait queue for task management | ||
140 | * @tm_condition: condition variable for task management | ||
141 | * @ufshcd_state: UFSHCD states | ||
142 | * @int_enable_mask: Interrupt Mask Bits | ||
143 | * @uic_workq: Work queue for UIC completion handling | ||
144 | * @feh_workq: Work queue for fatal controller error handling | ||
145 | * @errors: HBA errors | ||
146 | */ | ||
147 | struct ufs_hba { | ||
148 | void __iomem *mmio_base; | ||
149 | |||
150 | /* Virtual memory reference */ | ||
151 | struct utp_transfer_cmd_desc *ucdl_base_addr; | ||
152 | struct utp_transfer_req_desc *utrdl_base_addr; | ||
153 | struct utp_task_req_desc *utmrdl_base_addr; | ||
154 | |||
155 | /* DMA memory reference */ | ||
156 | dma_addr_t ucdl_dma_addr; | ||
157 | dma_addr_t utrdl_dma_addr; | ||
158 | dma_addr_t utmrdl_dma_addr; | ||
159 | |||
160 | struct Scsi_Host *host; | ||
161 | struct device *dev; | ||
162 | |||
163 | struct ufshcd_lrb *lrb; | ||
164 | |||
165 | unsigned long outstanding_tasks; | ||
166 | unsigned long outstanding_reqs; | ||
167 | |||
168 | u32 capabilities; | ||
169 | int nutrs; | ||
170 | int nutmrs; | ||
171 | u32 ufs_version; | ||
172 | unsigned int irq; | ||
173 | |||
174 | struct uic_command active_uic_cmd; | ||
175 | wait_queue_head_t ufshcd_tm_wait_queue; | ||
176 | unsigned long tm_condition; | ||
177 | |||
178 | u32 ufshcd_state; | ||
179 | u32 int_enable_mask; | ||
180 | |||
181 | /* Work Queues */ | ||
182 | struct work_struct uic_workq; | ||
183 | struct work_struct feh_workq; | ||
184 | |||
185 | /* HBA Errors */ | ||
186 | u32 errors; | ||
187 | }; | ||
188 | |||
189 | int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * , | ||
190 | unsigned int); | ||
191 | void ufshcd_remove(struct ufs_hba *); | ||
192 | |||
193 | /** | ||
194 | * ufshcd_hba_stop - Send controller to reset state | ||
195 | * @hba: per adapter instance | ||
196 | */ | ||
197 | static inline void ufshcd_hba_stop(struct ufs_hba *hba) | ||
198 | { | ||
199 | writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE)); | ||
200 | } | ||
201 | |||
202 | #endif /* End of Header */ | ||
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 6e3510f71167..0c164847a3ef 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h | |||
@@ -2,45 +2,35 @@ | |||
2 | * Universal Flash Storage Host controller driver | 2 | * Universal Flash Storage Host controller driver |
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshci.h | 4 | * This code is based on drivers/scsi/ufs/ufshci.h |
5 | * Copyright (C) 2011-2012 Samsung India Software Operations | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
6 | * | 6 | * |
7 | * Santosh Yaraganavi <santosh.sy@samsung.com> | 7 | * Authors: |
8 | * Vinayak Holikatti <h.vinayak@samsung.com> | 8 | * Santosh Yaraganavi <santosh.sy@samsung.com> |
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version 2 | 13 | * as published by the Free Software Foundation; either version 2 |
13 | * of the License, or (at your option) any later version. | 14 | * of the License, or (at your option) any later version. |
15 | * See the COPYING file in the top-level directory or visit | ||
16 | * <http://www.gnu.org/licenses/gpl-2.0.html> | ||
14 | * | 17 | * |
15 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
19 | * | 22 | * |
20 | * NO WARRANTY | 23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
21 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | 24 | * without warranty of any kind. You are solely responsible for |
22 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | 25 | * determining the appropriateness of using and distributing |
23 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | 26 | * the program and assume all risks associated with your exercise |
24 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | 27 | * of rights with respect to the program, including but not limited |
25 | * solely responsible for determining the appropriateness of using and | 28 | * to infringement of third party rights, the risks and costs of |
26 | * distributing the Program and assumes all risks associated with its | 29 | * program errors, damage to or loss of data, programs or equipment, |
27 | * exercise of rights under this Agreement, including but not limited to | 30 | * and unavailability or interruption of operations. Under no |
28 | * the risks and costs of program errors, damage to or loss of data, | 31 | * circumstances will the contributor of this Program be liable for |
29 | * programs or equipment, and unavailability or interruption of operations. | 32 | * any damages of any kind arising from your use or distribution of |
30 | 33 | * this program. | |
31 | * DISCLAIMER OF LIABILITY | ||
32 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
33 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
34 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
35 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
36 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
37 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
38 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
39 | |||
40 | * You should have received a copy of the GNU General Public License | ||
41 | * along with this program; if not, write to the Free Software | ||
42 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | ||
43 | * USA. | ||
44 | */ | 34 | */ |
45 | 35 | ||
46 | #ifndef _UFSHCI_H | 36 | #ifndef _UFSHCI_H |
diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild index f2b94918994d..562ff9d591b8 100644 --- a/include/scsi/Kbuild +++ b/include/scsi/Kbuild | |||
@@ -1,4 +1 @@ | |||
1 | header-y += scsi_netlink.h | ||
2 | header-y += scsi_netlink_fc.h | ||
3 | header-y += scsi_bsg_fc.h | ||
4 | header-y += fc/ | header-y += fc/ | |
diff --git a/include/scsi/fc/Kbuild b/include/scsi/fc/Kbuild index 56603813c6cd..e69de29bb2d1 100644 --- a/include/scsi/fc/Kbuild +++ b/include/scsi/fc/Kbuild | |||
@@ -1,4 +0,0 @@ | |||
1 | header-y += fc_els.h | ||
2 | header-y += fc_fs.h | ||
3 | header-y += fc_gs.h | ||
4 | header-y += fc_ns.h | ||
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h index 604cb9bb3e76..7e2314870341 100644 --- a/include/scsi/fcoe_sysfs.h +++ b/include/scsi/fcoe_sysfs.h | |||
@@ -34,7 +34,8 @@ struct fcoe_sysfs_function_template { | |||
34 | void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *); | 34 | void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *); |
35 | void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *); | 35 | void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *); |
36 | void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *); | 36 | void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *); |
37 | void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *); | 37 | void (*set_fcoe_ctlr_mode)(struct fcoe_ctlr_device *); |
38 | int (*set_fcoe_ctlr_enabled)(struct fcoe_ctlr_device *); | ||
38 | void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *); | 39 | void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *); |
39 | void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *); | 40 | void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *); |
40 | }; | 41 | }; |
@@ -48,6 +49,12 @@ enum fip_conn_type { | |||
48 | FIP_CONN_TYPE_VN2VN, | 49 | FIP_CONN_TYPE_VN2VN, |
49 | }; | 50 | }; |
50 | 51 | ||
52 | enum ctlr_enabled_state { | ||
53 | FCOE_CTLR_ENABLED, | ||
54 | FCOE_CTLR_DISABLED, | ||
55 | FCOE_CTLR_UNUSED, | ||
56 | }; | ||
57 | |||
51 | struct fcoe_ctlr_device { | 58 | struct fcoe_ctlr_device { |
52 | u32 id; | 59 | u32 id; |
53 | 60 | ||
@@ -64,6 +71,8 @@ struct fcoe_ctlr_device { | |||
64 | int fcf_dev_loss_tmo; | 71 | int fcf_dev_loss_tmo; |
65 | enum fip_conn_type mode; | 72 | enum fip_conn_type mode; |
66 | 73 | ||
74 | enum ctlr_enabled_state enabled; | ||
75 | |||
67 | /* expected in host order for displaying */ | 76 | /* expected in host order for displaying */ |
68 | struct fcoe_fc_els_lesb lesb; | 77 | struct fcoe_fc_els_lesb lesb; |
69 | }; | 78 | }; |
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 8742d853a3b8..4427393115ea 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h | |||
@@ -260,6 +260,9 @@ void __fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb, | |||
260 | struct net_device *netdev); | 260 | struct net_device *netdev); |
261 | void fcoe_wwn_to_str(u64 wwn, char *buf, int len); | 261 | void fcoe_wwn_to_str(u64 wwn, char *buf, int len); |
262 | int fcoe_validate_vport_create(struct fc_vport *vport); | 262 | int fcoe_validate_vport_create(struct fc_vport *vport); |
263 | int fcoe_link_speed_update(struct fc_lport *); | ||
264 | void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); | ||
265 | void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev); | ||
263 | 266 | ||
264 | /** | 267 | /** |
265 | * is_fip_mode() - returns true if FIP mode selected. | 268 | * is_fip_mode() - returns true if FIP mode selected. |
@@ -289,8 +292,11 @@ static inline bool is_fip_mode(struct fcoe_ctlr *fip) | |||
289 | * @attached: whether this transport is already attached | 292 | * @attached: whether this transport is already attached |
290 | * @list: list linkage to all attached transports | 293 | * @list: list linkage to all attached transports |
291 | * @match: handler to allow the transport driver to match up a given netdev | 294 | * @match: handler to allow the transport driver to match up a given netdev |
295 | * @alloc: handler to allocate per-instance FCoE structures | ||
296 | * (no discovery or login) | ||
292 | * @create: handler to sysfs entry of create for FCoE instances | 297 | * @create: handler to sysfs entry of create for FCoE instances |
293 | * @destroy: handler to sysfs entry of destroy for FCoE instances | 298 | * @destroy: handler to delete per-instance FCoE structures |
299 | * (frees all memory) | ||
294 | * @enable: handler to sysfs entry of enable for FCoE instances | 300 | * @enable: handler to sysfs entry of enable for FCoE instances |
295 | * @disable: handler to sysfs entry of disable for FCoE instances | 301 | * @disable: handler to sysfs entry of disable for FCoE instances |
296 | */ | 302 | */ |
@@ -299,6 +305,7 @@ struct fcoe_transport { | |||
299 | bool attached; | 305 | bool attached; |
300 | struct list_head list; | 306 | struct list_head list; |
301 | bool (*match) (struct net_device *device); | 307 | bool (*match) (struct net_device *device); |
308 | int (*alloc) (struct net_device *device); | ||
302 | int (*create) (struct net_device *device, enum fip_state fip_mode); | 309 | int (*create) (struct net_device *device, enum fip_state fip_mode); |
303 | int (*destroy) (struct net_device *device); | 310 | int (*destroy) (struct net_device *device); |
304 | int (*enable) (struct net_device *device); | 311 | int (*enable) (struct net_device *device); |
@@ -347,7 +354,20 @@ struct fcoe_port { | |||
347 | struct timer_list timer; | 354 | struct timer_list timer; |
348 | struct work_struct destroy_work; | 355 | struct work_struct destroy_work; |
349 | u8 data_src_addr[ETH_ALEN]; | 356 | u8 data_src_addr[ETH_ALEN]; |
357 | struct net_device * (*get_netdev)(const struct fc_lport *lport); | ||
350 | }; | 358 | }; |
359 | |||
360 | /** | ||
361 | * fcoe_get_netdev() - Return the net device associated with a local port | ||
362 | * @lport: The local port to get the net device from | ||
363 | */ | ||
364 | static inline struct net_device *fcoe_get_netdev(const struct fc_lport *lport) | ||
365 | { | ||
366 | struct fcoe_port *port = ((struct fcoe_port *)lport_priv(lport)); | ||
367 | |||
368 | return (port->get_netdev) ? port->get_netdev(lport) : NULL; | ||
369 | } | ||
370 | |||
351 | void fcoe_clean_pending_queue(struct fc_lport *); | 371 | void fcoe_clean_pending_queue(struct fc_lport *); |
352 | void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb); | 372 | void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb); |
353 | void fcoe_queue_timer(ulong lport); | 373 | void fcoe_queue_timer(ulong lport); |
@@ -356,7 +376,7 @@ int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, | |||
356 | 376 | ||
357 | /* FCoE Sysfs helpers */ | 377 | /* FCoE Sysfs helpers */ |
358 | void fcoe_fcf_get_selected(struct fcoe_fcf_device *); | 378 | void fcoe_fcf_get_selected(struct fcoe_fcf_device *); |
359 | void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *); | 379 | void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *); |
360 | 380 | ||
361 | /** | 381 | /** |
362 | * struct netdev_list | 382 | * struct netdev_list |
@@ -372,4 +392,12 @@ struct fcoe_netdev_mapping { | |||
372 | int fcoe_transport_attach(struct fcoe_transport *ft); | 392 | int fcoe_transport_attach(struct fcoe_transport *ft); |
373 | int fcoe_transport_detach(struct fcoe_transport *ft); | 393 | int fcoe_transport_detach(struct fcoe_transport *ft); |
374 | 394 | ||
395 | /* sysfs store handler for ctrl_control interface */ | ||
396 | ssize_t fcoe_ctlr_create_store(struct bus_type *bus, | ||
397 | const char *buf, size_t count); | ||
398 | ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus, | ||
399 | const char *buf, size_t count); | ||
400 | |||
375 | #endif /* _LIBFCOE_H */ | 401 | #endif /* _LIBFCOE_H */ |
402 | |||
403 | |||
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 49084807eb6b..2b6956e9853d 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -873,7 +873,7 @@ static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsign | |||
873 | SHOST_DIF_TYPE2_PROTECTION, | 873 | SHOST_DIF_TYPE2_PROTECTION, |
874 | SHOST_DIF_TYPE3_PROTECTION }; | 874 | SHOST_DIF_TYPE3_PROTECTION }; |
875 | 875 | ||
876 | if (target_type > SHOST_DIF_TYPE3_PROTECTION) | 876 | if (target_type >= ARRAY_SIZE(cap)) |
877 | return 0; | 877 | return 0; |
878 | 878 | ||
879 | return shost->prot_capabilities & cap[target_type] ? target_type : 0; | 879 | return shost->prot_capabilities & cap[target_type] ? target_type : 0; |
@@ -887,7 +887,7 @@ static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsign | |||
887 | SHOST_DIX_TYPE2_PROTECTION, | 887 | SHOST_DIX_TYPE2_PROTECTION, |
888 | SHOST_DIX_TYPE3_PROTECTION }; | 888 | SHOST_DIX_TYPE3_PROTECTION }; |
889 | 889 | ||
890 | if (target_type > SHOST_DIX_TYPE3_PROTECTION) | 890 | if (target_type >= ARRAY_SIZE(cap)) |
891 | return 0; | 891 | return 0; |
892 | 892 | ||
893 | return shost->prot_capabilities & cap[target_type]; | 893 | return shost->prot_capabilities & cap[target_type]; |
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild index 29a87dd26cfb..75746d52f208 100644 --- a/include/uapi/scsi/Kbuild +++ b/include/uapi/scsi/Kbuild | |||
@@ -1,2 +1,5 @@ | |||
1 | # UAPI Header export list | 1 | # UAPI Header export list |
2 | header-y += fc/ | 2 | header-y += fc/ |
3 | header-y += scsi_bsg_fc.h | ||
4 | header-y += scsi_netlink.h | ||
5 | header-y += scsi_netlink_fc.h | ||
diff --git a/include/uapi/scsi/fc/Kbuild b/include/uapi/scsi/fc/Kbuild index aafaa5aa54d4..5ead9fac265c 100644 --- a/include/uapi/scsi/fc/Kbuild +++ b/include/uapi/scsi/fc/Kbuild | |||
@@ -1 +1,5 @@ | |||
1 | # UAPI Header export list | 1 | # UAPI Header export list |
2 | header-y += fc_els.h | ||
3 | header-y += fc_fs.h | ||
4 | header-y += fc_gs.h | ||
5 | header-y += fc_ns.h | ||
diff --git a/include/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h index 481abbd48e39..481abbd48e39 100644 --- a/include/scsi/fc/fc_els.h +++ b/include/uapi/scsi/fc/fc_els.h | |||
diff --git a/include/scsi/fc/fc_fs.h b/include/uapi/scsi/fc/fc_fs.h index 50f28b143451..50f28b143451 100644 --- a/include/scsi/fc/fc_fs.h +++ b/include/uapi/scsi/fc/fc_fs.h | |||
diff --git a/include/scsi/fc/fc_gs.h b/include/uapi/scsi/fc/fc_gs.h index a37346d47eb1..a37346d47eb1 100644 --- a/include/scsi/fc/fc_gs.h +++ b/include/uapi/scsi/fc/fc_gs.h | |||
diff --git a/include/scsi/fc/fc_ns.h b/include/uapi/scsi/fc/fc_ns.h index f7751d53f1d3..f7751d53f1d3 100644 --- a/include/scsi/fc/fc_ns.h +++ b/include/uapi/scsi/fc/fc_ns.h | |||
diff --git a/include/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h index 3031b900b087..3031b900b087 100644 --- a/include/scsi/scsi_bsg_fc.h +++ b/include/uapi/scsi/scsi_bsg_fc.h | |||
diff --git a/include/scsi/scsi_netlink.h b/include/uapi/scsi/scsi_netlink.h index 62b4edab15d3..62b4edab15d3 100644 --- a/include/scsi/scsi_netlink.h +++ b/include/uapi/scsi/scsi_netlink.h | |||
diff --git a/include/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h index cbf76e479761..cbf76e479761 100644 --- a/include/scsi/scsi_netlink_fc.h +++ b/include/uapi/scsi/scsi_netlink_fc.h | |||