diff options
author | Brian King <brking@linux.vnet.ibm.com> | 2009-06-08 17:19:08 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-06-08 19:05:21 -0400 |
commit | 126c5cc37e682e7c5ae96754994b1cb50c2d0cb5 (patch) | |
tree | 054781df1e5f7fb4d34eaed7e5674eff92cf63c1 /drivers/scsi/ibmvscsi/ibmvscsi.c | |
parent | c1988e3123751fd425fbae99d5c1776608e965a9 (diff) |
[SCSI] ibmvscsi: Add support for capabilities MAD
Add support to ibmvscsi for the capabilities MAD. This command gets sent
to the Virtual I/O server prior to login in order to communicate client
capabilities. Additionally it returns information regarding capabilities
that the server supports. The two main capabilities communicated in this
MAD are related to partition migration and client reserve. Client reserve
allows for SCSI-2 reservations to be sent to virtual disks which are backed
by physical LUNs and will result in the reservation being sent to the
physical LUN.
Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvscsi.c')
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 224 |
1 files changed, 197 insertions, 27 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 822fbc32a2a..11d2602ae88 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <linux/moduleparam.h> | 70 | #include <linux/moduleparam.h> |
71 | #include <linux/dma-mapping.h> | 71 | #include <linux/dma-mapping.h> |
72 | #include <linux/delay.h> | 72 | #include <linux/delay.h> |
73 | #include <linux/of.h> | ||
73 | #include <asm/firmware.h> | 74 | #include <asm/firmware.h> |
74 | #include <asm/vio.h> | 75 | #include <asm/vio.h> |
75 | #include <asm/firmware.h> | 76 | #include <asm/firmware.h> |
@@ -95,6 +96,7 @@ static int reset_timeout = 60; | |||
95 | static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; | 96 | static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; |
96 | static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; | 97 | static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; |
97 | static int fast_fail = 1; | 98 | static int fast_fail = 1; |
99 | static int client_reserve = 1; | ||
98 | 100 | ||
99 | static struct scsi_transport_template *ibmvscsi_transport_template; | 101 | static struct scsi_transport_template *ibmvscsi_transport_template; |
100 | 102 | ||
@@ -117,6 +119,8 @@ module_param_named(max_requests, max_requests, int, S_IRUGO); | |||
117 | MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); | 119 | MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); |
118 | module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); | 120 | module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); |
119 | MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); | 121 | MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); |
122 | module_param_named(client_reserve, client_reserve, int, S_IRUGO ); | ||
123 | MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); | ||
120 | 124 | ||
121 | /* ------------------------------------------------------------ | 125 | /* ------------------------------------------------------------ |
122 | * Routines for the event pool and event structs | 126 | * Routines for the event pool and event structs |
@@ -790,6 +794,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
790 | */ | 794 | */ |
791 | 795 | ||
792 | /** | 796 | /** |
797 | * map_persist_bufs: - Pre-map persistent data for adapter logins | ||
798 | * @hostdata: ibmvscsi_host_data of host | ||
799 | * | ||
800 | * Map the capabilities and adapter info DMA buffers to avoid runtime failures. | ||
801 | * Return 1 on error, 0 on success. | ||
802 | */ | ||
803 | static int map_persist_bufs(struct ibmvscsi_host_data *hostdata) | ||
804 | { | ||
805 | |||
806 | hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps, | ||
807 | sizeof(hostdata->caps), DMA_BIDIRECTIONAL); | ||
808 | |||
809 | if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) { | ||
810 | dev_err(hostdata->dev, "Unable to map capabilities buffer!\n"); | ||
811 | return 1; | ||
812 | } | ||
813 | |||
814 | hostdata->adapter_info_addr = dma_map_single(hostdata->dev, | ||
815 | &hostdata->madapter_info, | ||
816 | sizeof(hostdata->madapter_info), | ||
817 | DMA_BIDIRECTIONAL); | ||
818 | if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) { | ||
819 | dev_err(hostdata->dev, "Unable to map adapter info buffer!\n"); | ||
820 | dma_unmap_single(hostdata->dev, hostdata->caps_addr, | ||
821 | sizeof(hostdata->caps), DMA_BIDIRECTIONAL); | ||
822 | return 1; | ||
823 | } | ||
824 | |||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | /** | ||
829 | * unmap_persist_bufs: - Unmap persistent data needed for adapter logins | ||
830 | * @hostdata: ibmvscsi_host_data of host | ||
831 | * | ||
832 | * Unmap the capabilities and adapter info DMA buffers | ||
833 | */ | ||
834 | static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata) | ||
835 | { | ||
836 | dma_unmap_single(hostdata->dev, hostdata->caps_addr, | ||
837 | sizeof(hostdata->caps), DMA_BIDIRECTIONAL); | ||
838 | |||
839 | dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr, | ||
840 | sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); | ||
841 | } | ||
842 | |||
843 | /** | ||
793 | * login_rsp: - Handle response to SRP login request | 844 | * login_rsp: - Handle response to SRP login request |
794 | * @evt_struct: srp_event_struct with the response | 845 | * @evt_struct: srp_event_struct with the response |
795 | * | 846 | * |
@@ -817,6 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) | |||
817 | } | 868 | } |
818 | 869 | ||
819 | dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); | 870 | dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); |
871 | hostdata->client_migrated = 0; | ||
820 | 872 | ||
821 | /* Now we know what the real request-limit is. | 873 | /* Now we know what the real request-limit is. |
822 | * This value is set rather than added to request_limit because | 874 | * This value is set rather than added to request_limit because |
@@ -866,6 +918,93 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) | |||
866 | }; | 918 | }; |
867 | 919 | ||
868 | /** | 920 | /** |
921 | * capabilities_rsp: - Handle response to MAD adapter capabilities request | ||
922 | * @evt_struct: srp_event_struct with the response | ||
923 | * | ||
924 | * Used as a "done" callback by when sending adapter_info. | ||
925 | */ | ||
926 | static void capabilities_rsp(struct srp_event_struct *evt_struct) | ||
927 | { | ||
928 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | ||
929 | |||
930 | if (evt_struct->xfer_iu->mad.capabilities.common.status) { | ||
931 | dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", | ||
932 | evt_struct->xfer_iu->mad.capabilities.common.status); | ||
933 | } else { | ||
934 | if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) | ||
935 | dev_info(hostdata->dev, "Partition migration not supported\n"); | ||
936 | |||
937 | if (client_reserve) { | ||
938 | if (hostdata->caps.reserve.common.server_support == | ||
939 | SERVER_SUPPORTS_CAP) | ||
940 | dev_info(hostdata->dev, "Client reserve enabled\n"); | ||
941 | else | ||
942 | dev_info(hostdata->dev, "Client reserve not supported\n"); | ||
943 | } | ||
944 | } | ||
945 | |||
946 | send_srp_login(hostdata); | ||
947 | } | ||
948 | |||
949 | /** | ||
950 | * send_mad_capabilities: - Sends the mad capabilities request | ||
951 | * and stores the result so it can be retrieved with | ||
952 | * @hostdata: ibmvscsi_host_data of host | ||
953 | */ | ||
954 | static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) | ||
955 | { | ||
956 | struct viosrp_capabilities *req; | ||
957 | struct srp_event_struct *evt_struct; | ||
958 | unsigned long flags; | ||
959 | struct device_node *of_node = hostdata->dev->archdata.of_node; | ||
960 | const char *location; | ||
961 | |||
962 | evt_struct = get_event_struct(&hostdata->pool); | ||
963 | BUG_ON(!evt_struct); | ||
964 | |||
965 | init_event_struct(evt_struct, capabilities_rsp, | ||
966 | VIOSRP_MAD_FORMAT, info_timeout); | ||
967 | |||
968 | req = &evt_struct->iu.mad.capabilities; | ||
969 | memset(req, 0, sizeof(*req)); | ||
970 | |||
971 | hostdata->caps.flags = CAP_LIST_SUPPORTED; | ||
972 | if (hostdata->client_migrated) | ||
973 | hostdata->caps.flags |= CLIENT_MIGRATED; | ||
974 | |||
975 | strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), | ||
976 | sizeof(hostdata->caps.name)); | ||
977 | hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0'; | ||
978 | |||
979 | location = of_get_property(of_node, "ibm,loc-code", NULL); | ||
980 | location = location ? location : dev_name(hostdata->dev); | ||
981 | strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); | ||
982 | hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; | ||
983 | |||
984 | req->common.type = VIOSRP_CAPABILITIES_TYPE; | ||
985 | req->buffer = hostdata->caps_addr; | ||
986 | |||
987 | hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; | ||
988 | hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); | ||
989 | hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; | ||
990 | hostdata->caps.migration.ecl = 1; | ||
991 | |||
992 | if (client_reserve) { | ||
993 | hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; | ||
994 | hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); | ||
995 | hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; | ||
996 | hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; | ||
997 | req->common.length = sizeof(hostdata->caps); | ||
998 | } else | ||
999 | req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); | ||
1000 | |||
1001 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
1002 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) | ||
1003 | dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n"); | ||
1004 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
1005 | }; | ||
1006 | |||
1007 | /** | ||
869 | * fast_fail_rsp: - Handle response to MAD enable fast fail | 1008 | * fast_fail_rsp: - Handle response to MAD enable fast fail |
870 | * @evt_struct: srp_event_struct with the response | 1009 | * @evt_struct: srp_event_struct with the response |
871 | * | 1010 | * |
@@ -884,7 +1023,7 @@ static void fast_fail_rsp(struct srp_event_struct *evt_struct) | |||
884 | else if (status != VIOSRP_MAD_SUCCESS) | 1023 | else if (status != VIOSRP_MAD_SUCCESS) |
885 | dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); | 1024 | dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); |
886 | 1025 | ||
887 | send_srp_login(hostdata); | 1026 | send_mad_capabilities(hostdata); |
888 | } | 1027 | } |
889 | 1028 | ||
890 | /** | 1029 | /** |
@@ -900,8 +1039,10 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) | |||
900 | struct viosrp_fast_fail *fast_fail_mad; | 1039 | struct viosrp_fast_fail *fast_fail_mad; |
901 | struct srp_event_struct *evt_struct; | 1040 | struct srp_event_struct *evt_struct; |
902 | 1041 | ||
903 | if (!fast_fail) | 1042 | if (!fast_fail) { |
904 | return send_srp_login(hostdata); | 1043 | send_mad_capabilities(hostdata); |
1044 | return 0; | ||
1045 | } | ||
905 | 1046 | ||
906 | evt_struct = get_event_struct(&hostdata->pool); | 1047 | evt_struct = get_event_struct(&hostdata->pool); |
907 | BUG_ON(!evt_struct); | 1048 | BUG_ON(!evt_struct); |
@@ -929,10 +1070,6 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) | |||
929 | static void adapter_info_rsp(struct srp_event_struct *evt_struct) | 1070 | static void adapter_info_rsp(struct srp_event_struct *evt_struct) |
930 | { | 1071 | { |
931 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | 1072 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; |
932 | dma_unmap_single(hostdata->dev, | ||
933 | evt_struct->iu.mad.adapter_info.buffer, | ||
934 | evt_struct->iu.mad.adapter_info.common.length, | ||
935 | DMA_BIDIRECTIONAL); | ||
936 | 1073 | ||
937 | if (evt_struct->xfer_iu->mad.adapter_info.common.status) { | 1074 | if (evt_struct->xfer_iu->mad.adapter_info.common.status) { |
938 | dev_err(hostdata->dev, "error %d getting adapter info\n", | 1075 | dev_err(hostdata->dev, "error %d getting adapter info\n", |
@@ -977,7 +1114,6 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
977 | struct viosrp_adapter_info *req; | 1114 | struct viosrp_adapter_info *req; |
978 | struct srp_event_struct *evt_struct; | 1115 | struct srp_event_struct *evt_struct; |
979 | unsigned long flags; | 1116 | unsigned long flags; |
980 | dma_addr_t addr; | ||
981 | 1117 | ||
982 | evt_struct = get_event_struct(&hostdata->pool); | 1118 | evt_struct = get_event_struct(&hostdata->pool); |
983 | BUG_ON(!evt_struct); | 1119 | BUG_ON(!evt_struct); |
@@ -992,28 +1128,11 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
992 | 1128 | ||
993 | req->common.type = VIOSRP_ADAPTER_INFO_TYPE; | 1129 | req->common.type = VIOSRP_ADAPTER_INFO_TYPE; |
994 | req->common.length = sizeof(hostdata->madapter_info); | 1130 | req->common.length = sizeof(hostdata->madapter_info); |
995 | req->buffer = addr = dma_map_single(hostdata->dev, | 1131 | req->buffer = hostdata->adapter_info_addr; |
996 | &hostdata->madapter_info, | ||
997 | sizeof(hostdata->madapter_info), | ||
998 | DMA_BIDIRECTIONAL); | ||
999 | 1132 | ||
1000 | if (dma_mapping_error(hostdata->dev, req->buffer)) { | ||
1001 | if (!firmware_has_feature(FW_FEATURE_CMO)) | ||
1002 | dev_err(hostdata->dev, | ||
1003 | "Unable to map request_buffer for " | ||
1004 | "adapter_info!\n"); | ||
1005 | free_event_struct(&hostdata->pool, evt_struct); | ||
1006 | return; | ||
1007 | } | ||
1008 | |||
1009 | spin_lock_irqsave(hostdata->host->host_lock, flags); | 1133 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
1010 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) { | 1134 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) |
1011 | dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); | 1135 | dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); |
1012 | dma_unmap_single(hostdata->dev, | ||
1013 | addr, | ||
1014 | sizeof(hostdata->madapter_info), | ||
1015 | DMA_BIDIRECTIONAL); | ||
1016 | } | ||
1017 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 1136 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
1018 | }; | 1137 | }; |
1019 | 1138 | ||
@@ -1361,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1361 | if (crq->format == 0x06) { | 1480 | if (crq->format == 0x06) { |
1362 | /* We need to re-setup the interpartition connection */ | 1481 | /* We need to re-setup the interpartition connection */ |
1363 | dev_info(hostdata->dev, "Re-enabling adapter!\n"); | 1482 | dev_info(hostdata->dev, "Re-enabling adapter!\n"); |
1483 | hostdata->client_migrated = 1; | ||
1364 | purge_requests(hostdata, DID_REQUEUE); | 1484 | purge_requests(hostdata, DID_REQUEUE); |
1365 | if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, | 1485 | if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, |
1366 | hostdata)) || | 1486 | hostdata)) || |
@@ -1529,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) | |||
1529 | /* ------------------------------------------------------------ | 1649 | /* ------------------------------------------------------------ |
1530 | * sysfs attributes | 1650 | * sysfs attributes |
1531 | */ | 1651 | */ |
1652 | static ssize_t show_host_vhost_loc(struct device *dev, | ||
1653 | struct device_attribute *attr, char *buf) | ||
1654 | { | ||
1655 | struct Scsi_Host *shost = class_to_shost(dev); | ||
1656 | struct ibmvscsi_host_data *hostdata = shost_priv(shost); | ||
1657 | int len; | ||
1658 | |||
1659 | len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n", | ||
1660 | hostdata->caps.loc); | ||
1661 | return len; | ||
1662 | } | ||
1663 | |||
1664 | static struct device_attribute ibmvscsi_host_vhost_loc = { | ||
1665 | .attr = { | ||
1666 | .name = "vhost_loc", | ||
1667 | .mode = S_IRUGO, | ||
1668 | }, | ||
1669 | .show = show_host_vhost_loc, | ||
1670 | }; | ||
1671 | |||
1672 | static ssize_t show_host_vhost_name(struct device *dev, | ||
1673 | struct device_attribute *attr, char *buf) | ||
1674 | { | ||
1675 | struct Scsi_Host *shost = class_to_shost(dev); | ||
1676 | struct ibmvscsi_host_data *hostdata = shost_priv(shost); | ||
1677 | int len; | ||
1678 | |||
1679 | len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n", | ||
1680 | hostdata->caps.name); | ||
1681 | return len; | ||
1682 | } | ||
1683 | |||
1684 | static struct device_attribute ibmvscsi_host_vhost_name = { | ||
1685 | .attr = { | ||
1686 | .name = "vhost_name", | ||
1687 | .mode = S_IRUGO, | ||
1688 | }, | ||
1689 | .show = show_host_vhost_name, | ||
1690 | }; | ||
1691 | |||
1532 | static ssize_t show_host_srp_version(struct device *dev, | 1692 | static ssize_t show_host_srp_version(struct device *dev, |
1533 | struct device_attribute *attr, char *buf) | 1693 | struct device_attribute *attr, char *buf) |
1534 | { | 1694 | { |
@@ -1652,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = { | |||
1652 | }; | 1812 | }; |
1653 | 1813 | ||
1654 | static struct device_attribute *ibmvscsi_attrs[] = { | 1814 | static struct device_attribute *ibmvscsi_attrs[] = { |
1815 | &ibmvscsi_host_vhost_loc, | ||
1816 | &ibmvscsi_host_vhost_name, | ||
1655 | &ibmvscsi_host_srp_version, | 1817 | &ibmvscsi_host_srp_version, |
1656 | &ibmvscsi_host_partition_name, | 1818 | &ibmvscsi_host_partition_name, |
1657 | &ibmvscsi_host_partition_number, | 1819 | &ibmvscsi_host_partition_number, |
@@ -1732,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1732 | atomic_set(&hostdata->request_limit, -1); | 1894 | atomic_set(&hostdata->request_limit, -1); |
1733 | hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; | 1895 | hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; |
1734 | 1896 | ||
1897 | if (map_persist_bufs(hostdata)) { | ||
1898 | dev_err(&vdev->dev, "couldn't map persistent buffers\n"); | ||
1899 | goto persist_bufs_failed; | ||
1900 | } | ||
1901 | |||
1735 | rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); | 1902 | rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); |
1736 | if (rc != 0 && rc != H_RESOURCE) { | 1903 | if (rc != 0 && rc != H_RESOURCE) { |
1737 | dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); | 1904 | dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); |
@@ -1792,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1792 | init_pool_failed: | 1959 | init_pool_failed: |
1793 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); | 1960 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); |
1794 | init_crq_failed: | 1961 | init_crq_failed: |
1962 | unmap_persist_bufs(hostdata); | ||
1963 | persist_bufs_failed: | ||
1795 | scsi_host_put(host); | 1964 | scsi_host_put(host); |
1796 | scsi_host_alloc_failed: | 1965 | scsi_host_alloc_failed: |
1797 | return -1; | 1966 | return -1; |
@@ -1800,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1800 | static int ibmvscsi_remove(struct vio_dev *vdev) | 1969 | static int ibmvscsi_remove(struct vio_dev *vdev) |
1801 | { | 1970 | { |
1802 | struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; | 1971 | struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; |
1972 | unmap_persist_bufs(hostdata); | ||
1803 | release_event_pool(&hostdata->pool, hostdata); | 1973 | release_event_pool(&hostdata->pool, hostdata); |
1804 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, | 1974 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, |
1805 | max_events); | 1975 | max_events); |