diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-09-21 18:29:36 -0400 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.localdomain> | 2007-10-12 14:49:26 -0400 |
commit | d3849d512fb0ca1e369e3efcaec910a949f55f62 (patch) | |
tree | 52b79dd6cd4fe8835ad46b35083edf803c5180b5 /drivers | |
parent | 5307b1e8b050f309901acc9c6121061206a70ba5 (diff) |
[SCSI] Fix ibmvscsi client for multiplatform iSeries+pSeries kernel
If you build a multiplatform kernel for iSeries and pSeries, with
ibmvscsic support, the resulting client doesn't work on iSeries.
This fixes that, using the appropriate low-level operations
for the machine detected at runtime.
[jejb: fixed up rejections around the srp transport patch]
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Acked by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/ibmvscsi/Makefile | 2 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 46 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.h | 32 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/iseries_vscsi.c | 37 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/rpa_vscsi.c | 113 |
5 files changed, 130 insertions, 100 deletions
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile index f67d9efc7a99..6ac0633d5452 100644 --- a/drivers/scsi/ibmvscsi/Makefile +++ b/drivers/scsi/ibmvscsi/Makefile | |||
@@ -1,9 +1,7 @@ | |||
1 | obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o | 1 | obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o |
2 | 2 | ||
3 | ibmvscsic-y += ibmvscsi.o | 3 | ibmvscsic-y += ibmvscsi.o |
4 | ifndef CONFIG_PPC_PSERIES | ||
5 | ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o | 4 | ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o |
6 | endif | ||
7 | ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o | 5 | ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o |
8 | 6 | ||
9 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o | 7 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 93bd01b1e4b5..cda0cc3d182f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <linux/moduleparam.h> | 70 | #include <linux/moduleparam.h> |
71 | #include <linux/dma-mapping.h> | 71 | #include <linux/dma-mapping.h> |
72 | #include <linux/delay.h> | 72 | #include <linux/delay.h> |
73 | #include <asm/firmware.h> | ||
73 | #include <asm/vio.h> | 74 | #include <asm/vio.h> |
74 | #include <scsi/scsi.h> | 75 | #include <scsi/scsi.h> |
75 | #include <scsi/scsi_cmnd.h> | 76 | #include <scsi/scsi_cmnd.h> |
@@ -92,6 +93,8 @@ static struct scsi_transport_template *ibmvscsi_transport_template; | |||
92 | 93 | ||
93 | #define IBMVSCSI_VERSION "1.5.8" | 94 | #define IBMVSCSI_VERSION "1.5.8" |
94 | 95 | ||
96 | static struct ibmvscsi_ops *ibmvscsi_ops; | ||
97 | |||
95 | MODULE_DESCRIPTION("IBM Virtual SCSI"); | 98 | MODULE_DESCRIPTION("IBM Virtual SCSI"); |
96 | MODULE_AUTHOR("Dave Boutcher"); | 99 | MODULE_AUTHOR("Dave Boutcher"); |
97 | MODULE_LICENSE("GPL"); | 100 | MODULE_LICENSE("GPL"); |
@@ -509,8 +512,8 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata) | |||
509 | atomic_set(&hostdata->request_limit, 0); | 512 | atomic_set(&hostdata->request_limit, 0); |
510 | 513 | ||
511 | purge_requests(hostdata, DID_ERROR); | 514 | purge_requests(hostdata, DID_ERROR); |
512 | if ((ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata)) || | 515 | if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata)) || |
513 | (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0)) || | 516 | (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0)) || |
514 | (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) { | 517 | (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) { |
515 | atomic_set(&hostdata->request_limit, -1); | 518 | atomic_set(&hostdata->request_limit, -1); |
516 | dev_err(hostdata->dev, "error after reset\n"); | 519 | dev_err(hostdata->dev, "error after reset\n"); |
@@ -615,7 +618,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
615 | } | 618 | } |
616 | 619 | ||
617 | if ((rc = | 620 | if ((rc = |
618 | ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { | 621 | ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { |
619 | list_del(&evt_struct->list); | 622 | list_del(&evt_struct->list); |
620 | del_timer(&evt_struct->timer); | 623 | del_timer(&evt_struct->timer); |
621 | 624 | ||
@@ -1214,8 +1217,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1214 | case 0x01: /* Initialization message */ | 1217 | case 0x01: /* Initialization message */ |
1215 | dev_info(hostdata->dev, "partner initialized\n"); | 1218 | dev_info(hostdata->dev, "partner initialized\n"); |
1216 | /* Send back a response */ | 1219 | /* Send back a response */ |
1217 | if ((rc = ibmvscsi_send_crq(hostdata, | 1220 | if ((rc = ibmvscsi_ops->send_crq(hostdata, |
1218 | 0xC002000000000000LL, 0)) == 0) { | 1221 | 0xC002000000000000LL, 0)) == 0) { |
1219 | /* Now login */ | 1222 | /* Now login */ |
1220 | send_srp_login(hostdata); | 1223 | send_srp_login(hostdata); |
1221 | } else { | 1224 | } else { |
@@ -1240,10 +1243,10 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1240 | /* We need to re-setup the interpartition connection */ | 1243 | /* We need to re-setup the interpartition connection */ |
1241 | dev_info(hostdata->dev, "Re-enabling adapter!\n"); | 1244 | dev_info(hostdata->dev, "Re-enabling adapter!\n"); |
1242 | purge_requests(hostdata, DID_REQUEUE); | 1245 | purge_requests(hostdata, DID_REQUEUE); |
1243 | if ((ibmvscsi_reenable_crq_queue(&hostdata->queue, | 1246 | if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, |
1244 | hostdata)) || | 1247 | hostdata)) || |
1245 | (ibmvscsi_send_crq(hostdata, | 1248 | (ibmvscsi_ops->send_crq(hostdata, |
1246 | 0xC001000000000000LL, 0))) { | 1249 | 0xC001000000000000LL, 0))) { |
1247 | atomic_set(&hostdata->request_limit, | 1250 | atomic_set(&hostdata->request_limit, |
1248 | -1); | 1251 | -1); |
1249 | dev_err(hostdata->dev, "error after enable\n"); | 1252 | dev_err(hostdata->dev, "error after enable\n"); |
@@ -1253,10 +1256,10 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1253 | crq->format); | 1256 | crq->format); |
1254 | 1257 | ||
1255 | purge_requests(hostdata, DID_ERROR); | 1258 | purge_requests(hostdata, DID_ERROR); |
1256 | if ((ibmvscsi_reset_crq_queue(&hostdata->queue, | 1259 | if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue, |
1257 | hostdata)) || | 1260 | hostdata)) || |
1258 | (ibmvscsi_send_crq(hostdata, | 1261 | (ibmvscsi_ops->send_crq(hostdata, |
1259 | 0xC001000000000000LL, 0))) { | 1262 | 0xC001000000000000LL, 0))) { |
1260 | atomic_set(&hostdata->request_limit, | 1263 | atomic_set(&hostdata->request_limit, |
1261 | -1); | 1264 | -1); |
1262 | dev_err(hostdata->dev, "error after reset\n"); | 1265 | dev_err(hostdata->dev, "error after reset\n"); |
@@ -1579,7 +1582,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1579 | atomic_set(&hostdata->request_limit, -1); | 1582 | atomic_set(&hostdata->request_limit, -1); |
1580 | hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ | 1583 | hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ |
1581 | 1584 | ||
1582 | rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests); | 1585 | rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests); |
1583 | if (rc != 0 && rc != H_RESOURCE) { | 1586 | if (rc != 0 && rc != H_RESOURCE) { |
1584 | dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); | 1587 | dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); |
1585 | goto init_crq_failed; | 1588 | goto init_crq_failed; |
@@ -1608,7 +1611,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1608 | * to fail if the other end is not acive. In that case we don't | 1611 | * to fail if the other end is not acive. In that case we don't |
1609 | * want to scan | 1612 | * want to scan |
1610 | */ | 1613 | */ |
1611 | if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0 | 1614 | if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0 |
1612 | || rc == H_RESOURCE) { | 1615 | || rc == H_RESOURCE) { |
1613 | /* | 1616 | /* |
1614 | * Wait around max init_timeout secs for the adapter to finish | 1617 | * Wait around max init_timeout secs for the adapter to finish |
@@ -1636,7 +1639,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1636 | add_host_failed: | 1639 | add_host_failed: |
1637 | release_event_pool(&hostdata->pool, hostdata); | 1640 | release_event_pool(&hostdata->pool, hostdata); |
1638 | init_pool_failed: | 1641 | init_pool_failed: |
1639 | ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests); | 1642 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_requests); |
1640 | init_crq_failed: | 1643 | init_crq_failed: |
1641 | scsi_host_put(host); | 1644 | scsi_host_put(host); |
1642 | scsi_host_alloc_failed: | 1645 | scsi_host_alloc_failed: |
@@ -1647,8 +1650,8 @@ static int ibmvscsi_remove(struct vio_dev *vdev) | |||
1647 | { | 1650 | { |
1648 | struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; | 1651 | struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; |
1649 | release_event_pool(&hostdata->pool, hostdata); | 1652 | release_event_pool(&hostdata->pool, hostdata); |
1650 | ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, | 1653 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, |
1651 | max_requests); | 1654 | max_requests); |
1652 | 1655 | ||
1653 | srp_remove_host(hostdata->host); | 1656 | srp_remove_host(hostdata->host); |
1654 | scsi_remove_host(hostdata->host); | 1657 | scsi_remove_host(hostdata->host); |
@@ -1684,6 +1687,13 @@ int __init ibmvscsi_module_init(void) | |||
1684 | { | 1687 | { |
1685 | int ret; | 1688 | int ret; |
1686 | 1689 | ||
1690 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | ||
1691 | ibmvscsi_ops = &iseriesvscsi_ops; | ||
1692 | else if (firmware_has_feature(FW_FEATURE_VIO)) | ||
1693 | ibmvscsi_ops = &rpavscsi_ops; | ||
1694 | else | ||
1695 | return -ENODEV; | ||
1696 | |||
1687 | ibmvscsi_transport_template = | 1697 | ibmvscsi_transport_template = |
1688 | srp_attach_transport(&ibmvscsi_transport_functions); | 1698 | srp_attach_transport(&ibmvscsi_transport_functions); |
1689 | if (!ibmvscsi_transport_template) | 1699 | if (!ibmvscsi_transport_template) |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index b19c2e26c2a5..46e850e302c7 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h | |||
@@ -98,21 +98,25 @@ struct ibmvscsi_host_data { | |||
98 | }; | 98 | }; |
99 | 99 | ||
100 | /* routines for managing a command/response queue */ | 100 | /* routines for managing a command/response queue */ |
101 | int ibmvscsi_init_crq_queue(struct crq_queue *queue, | ||
102 | struct ibmvscsi_host_data *hostdata, | ||
103 | int max_requests); | ||
104 | void ibmvscsi_release_crq_queue(struct crq_queue *queue, | ||
105 | struct ibmvscsi_host_data *hostdata, | ||
106 | int max_requests); | ||
107 | int ibmvscsi_reset_crq_queue(struct crq_queue *queue, | ||
108 | struct ibmvscsi_host_data *hostdata); | ||
109 | |||
110 | int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, | ||
111 | struct ibmvscsi_host_data *hostdata); | ||
112 | |||
113 | void ibmvscsi_handle_crq(struct viosrp_crq *crq, | 101 | void ibmvscsi_handle_crq(struct viosrp_crq *crq, |
114 | struct ibmvscsi_host_data *hostdata); | 102 | struct ibmvscsi_host_data *hostdata); |
115 | int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, | 103 | |
116 | u64 word1, u64 word2); | 104 | struct ibmvscsi_ops { |
105 | int (*init_crq_queue)(struct crq_queue *queue, | ||
106 | struct ibmvscsi_host_data *hostdata, | ||
107 | int max_requests); | ||
108 | void (*release_crq_queue)(struct crq_queue *queue, | ||
109 | struct ibmvscsi_host_data *hostdata, | ||
110 | int max_requests); | ||
111 | int (*reset_crq_queue)(struct crq_queue *queue, | ||
112 | struct ibmvscsi_host_data *hostdata); | ||
113 | int (*reenable_crq_queue)(struct crq_queue *queue, | ||
114 | struct ibmvscsi_host_data *hostdata); | ||
115 | int (*send_crq)(struct ibmvscsi_host_data *hostdata, | ||
116 | u64 word1, u64 word2); | ||
117 | }; | ||
118 | |||
119 | extern struct ibmvscsi_ops iseriesvscsi_ops; | ||
120 | extern struct ibmvscsi_ops rpavscsi_ops; | ||
117 | 121 | ||
118 | #endif /* IBMVSCSI_H */ | 122 | #endif /* IBMVSCSI_H */ |
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c index 6aeb5f003c3c..0775fdee5fa8 100644 --- a/drivers/scsi/ibmvscsi/iseries_vscsi.c +++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c | |||
@@ -53,7 +53,7 @@ struct srp_lp_event { | |||
53 | /** | 53 | /** |
54 | * standard interface for handling logical partition events. | 54 | * standard interface for handling logical partition events. |
55 | */ | 55 | */ |
56 | static void ibmvscsi_handle_event(struct HvLpEvent *lpevt) | 56 | static void iseriesvscsi_handle_event(struct HvLpEvent *lpevt) |
57 | { | 57 | { |
58 | struct srp_lp_event *evt = (struct srp_lp_event *)lpevt; | 58 | struct srp_lp_event *evt = (struct srp_lp_event *)lpevt; |
59 | 59 | ||
@@ -74,9 +74,9 @@ static void ibmvscsi_handle_event(struct HvLpEvent *lpevt) | |||
74 | /* ------------------------------------------------------------ | 74 | /* ------------------------------------------------------------ |
75 | * Routines for driver initialization | 75 | * Routines for driver initialization |
76 | */ | 76 | */ |
77 | int ibmvscsi_init_crq_queue(struct crq_queue *queue, | 77 | static int iseriesvscsi_init_crq_queue(struct crq_queue *queue, |
78 | struct ibmvscsi_host_data *hostdata, | 78 | struct ibmvscsi_host_data *hostdata, |
79 | int max_requests) | 79 | int max_requests) |
80 | { | 80 | { |
81 | int rc; | 81 | int rc; |
82 | 82 | ||
@@ -88,7 +88,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
88 | goto viopath_open_failed; | 88 | goto viopath_open_failed; |
89 | } | 89 | } |
90 | 90 | ||
91 | rc = vio_setHandler(viomajorsubtype_scsi, ibmvscsi_handle_event); | 91 | rc = vio_setHandler(viomajorsubtype_scsi, iseriesvscsi_handle_event); |
92 | if (rc < 0) { | 92 | if (rc < 0) { |
93 | printk("vio_setHandler failed with rc %d in open_event_path\n", | 93 | printk("vio_setHandler failed with rc %d in open_event_path\n", |
94 | rc); | 94 | rc); |
@@ -102,9 +102,9 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
102 | return -1; | 102 | return -1; |
103 | } | 103 | } |
104 | 104 | ||
105 | void ibmvscsi_release_crq_queue(struct crq_queue *queue, | 105 | static void iseriesvscsi_release_crq_queue(struct crq_queue *queue, |
106 | struct ibmvscsi_host_data *hostdata, | 106 | struct ibmvscsi_host_data *hostdata, |
107 | int max_requests) | 107 | int max_requests) |
108 | { | 108 | { |
109 | vio_clearHandler(viomajorsubtype_scsi); | 109 | vio_clearHandler(viomajorsubtype_scsi); |
110 | viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests); | 110 | viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests); |
@@ -117,8 +117,8 @@ void ibmvscsi_release_crq_queue(struct crq_queue *queue, | |||
117 | * | 117 | * |
118 | * no-op for iSeries | 118 | * no-op for iSeries |
119 | */ | 119 | */ |
120 | int ibmvscsi_reset_crq_queue(struct crq_queue *queue, | 120 | static int iseriesvscsi_reset_crq_queue(struct crq_queue *queue, |
121 | struct ibmvscsi_host_data *hostdata) | 121 | struct ibmvscsi_host_data *hostdata) |
122 | { | 122 | { |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
@@ -130,19 +130,20 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue, | |||
130 | * | 130 | * |
131 | * no-op for iSeries | 131 | * no-op for iSeries |
132 | */ | 132 | */ |
133 | int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, | 133 | static int iseriesvscsi_reenable_crq_queue(struct crq_queue *queue, |
134 | struct ibmvscsi_host_data *hostdata) | 134 | struct ibmvscsi_host_data *hostdata) |
135 | { | 135 | { |
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | /** | 139 | /** |
140 | * ibmvscsi_send_crq: - Send a CRQ | 140 | * iseriesvscsi_send_crq: - Send a CRQ |
141 | * @hostdata: the adapter | 141 | * @hostdata: the adapter |
142 | * @word1: the first 64 bits of the data | 142 | * @word1: the first 64 bits of the data |
143 | * @word2: the second 64 bits of the data | 143 | * @word2: the second 64 bits of the data |
144 | */ | 144 | */ |
145 | int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, u64 word1, u64 word2) | 145 | static int iseriesvscsi_send_crq(struct ibmvscsi_host_data *hostdata, |
146 | u64 word1, u64 word2) | ||
146 | { | 147 | { |
147 | single_host_data = hostdata; | 148 | single_host_data = hostdata; |
148 | return HvCallEvent_signalLpEventFast(viopath_hostLp, | 149 | return HvCallEvent_signalLpEventFast(viopath_hostLp, |
@@ -156,3 +157,11 @@ int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, u64 word1, u64 word2) | |||
156 | VIOVERSION << 16, word1, word2, 0, | 157 | VIOVERSION << 16, word1, word2, 0, |
157 | 0); | 158 | 0); |
158 | } | 159 | } |
160 | |||
161 | struct ibmvscsi_ops iseriesvscsi_ops = { | ||
162 | .init_crq_queue = iseriesvscsi_init_crq_queue, | ||
163 | .release_crq_queue = iseriesvscsi_release_crq_queue, | ||
164 | .reset_crq_queue = iseriesvscsi_reset_crq_queue, | ||
165 | .reenable_crq_queue = iseriesvscsi_reenable_crq_queue, | ||
166 | .send_crq = iseriesvscsi_send_crq, | ||
167 | }; | ||
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index 9c14e789df5f..182146100dc1 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c | |||
@@ -42,14 +42,14 @@ static unsigned int partition_number = -1; | |||
42 | * Routines for managing the command/response queue | 42 | * Routines for managing the command/response queue |
43 | */ | 43 | */ |
44 | /** | 44 | /** |
45 | * ibmvscsi_handle_event: - Interrupt handler for crq events | 45 | * rpavscsi_handle_event: - Interrupt handler for crq events |
46 | * @irq: number of irq to handle, not used | 46 | * @irq: number of irq to handle, not used |
47 | * @dev_instance: ibmvscsi_host_data of host that received interrupt | 47 | * @dev_instance: ibmvscsi_host_data of host that received interrupt |
48 | * | 48 | * |
49 | * Disables interrupts and schedules srp_task | 49 | * Disables interrupts and schedules srp_task |
50 | * Always returns IRQ_HANDLED | 50 | * Always returns IRQ_HANDLED |
51 | */ | 51 | */ |
52 | static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance) | 52 | static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance) |
53 | { | 53 | { |
54 | struct ibmvscsi_host_data *hostdata = | 54 | struct ibmvscsi_host_data *hostdata = |
55 | (struct ibmvscsi_host_data *)dev_instance; | 55 | (struct ibmvscsi_host_data *)dev_instance; |
@@ -66,9 +66,9 @@ static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance) | |||
66 | * Frees irq, deallocates a page for messages, unmaps dma, and unregisters | 66 | * Frees irq, deallocates a page for messages, unmaps dma, and unregisters |
67 | * the crq with the hypervisor. | 67 | * the crq with the hypervisor. |
68 | */ | 68 | */ |
69 | void ibmvscsi_release_crq_queue(struct crq_queue *queue, | 69 | static void rpavscsi_release_crq_queue(struct crq_queue *queue, |
70 | struct ibmvscsi_host_data *hostdata, | 70 | struct ibmvscsi_host_data *hostdata, |
71 | int max_requests) | 71 | int max_requests) |
72 | { | 72 | { |
73 | long rc; | 73 | long rc; |
74 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | 74 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); |
@@ -108,12 +108,13 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * ibmvscsi_send_crq: - Send a CRQ | 111 | * rpavscsi_send_crq: - Send a CRQ |
112 | * @hostdata: the adapter | 112 | * @hostdata: the adapter |
113 | * @word1: the first 64 bits of the data | 113 | * @word1: the first 64 bits of the data |
114 | * @word2: the second 64 bits of the data | 114 | * @word2: the second 64 bits of the data |
115 | */ | 115 | */ |
116 | int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, u64 word1, u64 word2) | 116 | static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata, |
117 | u64 word1, u64 word2) | ||
117 | { | 118 | { |
118 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | 119 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); |
119 | 120 | ||
@@ -121,10 +122,10 @@ int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, u64 word1, u64 word2) | |||
121 | } | 122 | } |
122 | 123 | ||
123 | /** | 124 | /** |
124 | * ibmvscsi_task: - Process srps asynchronously | 125 | * rpavscsi_task: - Process srps asynchronously |
125 | * @data: ibmvscsi_host_data of host | 126 | * @data: ibmvscsi_host_data of host |
126 | */ | 127 | */ |
127 | static void ibmvscsi_task(void *data) | 128 | static void rpavscsi_task(void *data) |
128 | { | 129 | { |
129 | struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data; | 130 | struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data; |
130 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | 131 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); |
@@ -190,6 +191,42 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
190 | } | 191 | } |
191 | 192 | ||
192 | /** | 193 | /** |
194 | * reset_crq_queue: - resets a crq after a failure | ||
195 | * @queue: crq_queue to initialize and register | ||
196 | * @hostdata: ibmvscsi_host_data of host | ||
197 | * | ||
198 | */ | ||
199 | static int rpavscsi_reset_crq_queue(struct crq_queue *queue, | ||
200 | struct ibmvscsi_host_data *hostdata) | ||
201 | { | ||
202 | int rc; | ||
203 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
204 | |||
205 | /* Close the CRQ */ | ||
206 | do { | ||
207 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
208 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
209 | |||
210 | /* Clean out the queue */ | ||
211 | memset(queue->msgs, 0x00, PAGE_SIZE); | ||
212 | queue->cur = 0; | ||
213 | |||
214 | set_adapter_info(hostdata); | ||
215 | |||
216 | /* And re-open it again */ | ||
217 | rc = plpar_hcall_norets(H_REG_CRQ, | ||
218 | vdev->unit_address, | ||
219 | queue->msg_token, PAGE_SIZE); | ||
220 | if (rc == 2) { | ||
221 | /* Adapter is good, but other end is not ready */ | ||
222 | dev_warn(hostdata->dev, "Partner adapter not ready\n"); | ||
223 | } else if (rc != 0) { | ||
224 | dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc); | ||
225 | } | ||
226 | return rc; | ||
227 | } | ||
228 | |||
229 | /** | ||
193 | * initialize_crq_queue: - Initializes and registers CRQ with hypervisor | 230 | * initialize_crq_queue: - Initializes and registers CRQ with hypervisor |
194 | * @queue: crq_queue to initialize and register | 231 | * @queue: crq_queue to initialize and register |
195 | * @hostdata: ibmvscsi_host_data of host | 232 | * @hostdata: ibmvscsi_host_data of host |
@@ -198,9 +235,9 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
198 | * the crq with the hypervisor. | 235 | * the crq with the hypervisor. |
199 | * Returns zero on success. | 236 | * Returns zero on success. |
200 | */ | 237 | */ |
201 | int ibmvscsi_init_crq_queue(struct crq_queue *queue, | 238 | static int rpavscsi_init_crq_queue(struct crq_queue *queue, |
202 | struct ibmvscsi_host_data *hostdata, | 239 | struct ibmvscsi_host_data *hostdata, |
203 | int max_requests) | 240 | int max_requests) |
204 | { | 241 | { |
205 | int rc; | 242 | int rc; |
206 | int retrc; | 243 | int retrc; |
@@ -227,7 +264,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
227 | queue->msg_token, PAGE_SIZE); | 264 | queue->msg_token, PAGE_SIZE); |
228 | if (rc == H_RESOURCE) | 265 | if (rc == H_RESOURCE) |
229 | /* maybe kexecing and resource is busy. try a reset */ | 266 | /* maybe kexecing and resource is busy. try a reset */ |
230 | rc = ibmvscsi_reset_crq_queue(queue, | 267 | rc = rpavscsi_reset_crq_queue(queue, |
231 | hostdata); | 268 | hostdata); |
232 | 269 | ||
233 | if (rc == 2) { | 270 | if (rc == 2) { |
@@ -240,7 +277,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
240 | } | 277 | } |
241 | 278 | ||
242 | if (request_irq(vdev->irq, | 279 | if (request_irq(vdev->irq, |
243 | ibmvscsi_handle_event, | 280 | rpavscsi_handle_event, |
244 | 0, "ibmvscsi", (void *)hostdata) != 0) { | 281 | 0, "ibmvscsi", (void *)hostdata) != 0) { |
245 | dev_err(hostdata->dev, "couldn't register irq 0x%x\n", | 282 | dev_err(hostdata->dev, "couldn't register irq 0x%x\n", |
246 | vdev->irq); | 283 | vdev->irq); |
@@ -256,7 +293,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
256 | queue->cur = 0; | 293 | queue->cur = 0; |
257 | spin_lock_init(&queue->lock); | 294 | spin_lock_init(&queue->lock); |
258 | 295 | ||
259 | tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, | 296 | tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task, |
260 | (unsigned long)hostdata); | 297 | (unsigned long)hostdata); |
261 | 298 | ||
262 | return retrc; | 299 | return retrc; |
@@ -281,8 +318,8 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
281 | * @hostdata: ibmvscsi_host_data of host | 318 | * @hostdata: ibmvscsi_host_data of host |
282 | * | 319 | * |
283 | */ | 320 | */ |
284 | int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, | 321 | static int rpavscsi_reenable_crq_queue(struct crq_queue *queue, |
285 | struct ibmvscsi_host_data *hostdata) | 322 | struct ibmvscsi_host_data *hostdata) |
286 | { | 323 | { |
287 | int rc; | 324 | int rc; |
288 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | 325 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); |
@@ -297,38 +334,10 @@ int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, | |||
297 | return rc; | 334 | return rc; |
298 | } | 335 | } |
299 | 336 | ||
300 | /** | 337 | struct ibmvscsi_ops rpavscsi_ops = { |
301 | * reset_crq_queue: - resets a crq after a failure | 338 | .init_crq_queue = rpavscsi_init_crq_queue, |
302 | * @queue: crq_queue to initialize and register | 339 | .release_crq_queue = rpavscsi_release_crq_queue, |
303 | * @hostdata: ibmvscsi_host_data of host | 340 | .reset_crq_queue = rpavscsi_reset_crq_queue, |
304 | * | 341 | .reenable_crq_queue = rpavscsi_reenable_crq_queue, |
305 | */ | 342 | .send_crq = rpavscsi_send_crq, |
306 | int ibmvscsi_reset_crq_queue(struct crq_queue *queue, | 343 | }; |
307 | struct ibmvscsi_host_data *hostdata) | ||
308 | { | ||
309 | int rc; | ||
310 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
311 | |||
312 | /* Close the CRQ */ | ||
313 | do { | ||
314 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
315 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
316 | |||
317 | /* Clean out the queue */ | ||
318 | memset(queue->msgs, 0x00, PAGE_SIZE); | ||
319 | queue->cur = 0; | ||
320 | |||
321 | set_adapter_info(hostdata); | ||
322 | |||
323 | /* And re-open it again */ | ||
324 | rc = plpar_hcall_norets(H_REG_CRQ, | ||
325 | vdev->unit_address, | ||
326 | queue->msg_token, PAGE_SIZE); | ||
327 | if (rc == 2) { | ||
328 | /* Adapter is good, but other end is not ready */ | ||
329 | dev_warn(hostdata->dev, "Partner adapter not ready\n"); | ||
330 | } else if (rc != 0) { | ||
331 | dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc); | ||
332 | } | ||
333 | return rc; | ||
334 | } | ||