diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 22:01:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 22:01:32 -0400 |
commit | 3151367f8778a1789d6f6e6f6c642681b6cd6d64 (patch) | |
tree | 1869d5429a25abd994ae94079808b8db060ec6f3 /drivers/scsi/ibmvscsi | |
parent | 16642a2e7be23bbda013fc32d8f6c68982eab603 (diff) | |
parent | fe709ed827d370e6b0c0a9f9456da1c22bdcd118 (diff) |
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley:
"This is a large set of updates, mostly for drivers (qla2xxx [including
support for new 83xx based card], qla4xxx, mpt2sas, bfa, zfcp, hpsa,
be2iscsi, isci, lpfc, ipr, ibmvfc, ibmvscsi, megaraid_sas).
There's also a rework for tape adding virtually unlimited numbers of
tape drives plus a set of dif fixes for sd and a fix for a live lock
on hot remove of SCSI devices.
This round includes a signed tag pull of isci-for-3.6
Signed-off-by: James Bottomley <JBottomley@Parallels.com>"
Fix up trivial conflict in drivers/scsi/qla2xxx/qla_nx.c due to new PCI
helper function use in a function that was removed by this pull.
* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (198 commits)
[SCSI] st: remove st_mutex
[SCSI] sd: Ensure we correctly disable devices with unknown protection type
[SCSI] hpsa: gen8plus Smart Array IDs
[SCSI] qla4xxx: Update driver version to 5.03.00-k1
[SCSI] qla4xxx: Disable generating pause frames for ISP83XX
[SCSI] qla4xxx: Fix double clearing of risc_intr for ISP83XX
[SCSI] qla4xxx: IDC implementation for Loopback
[SCSI] qla4xxx: update copyrights in LICENSE.qla4xxx
[SCSI] qla4xxx: Fix panic while rmmod
[SCSI] qla4xxx: Fail probe_adapter if IRQ allocation fails
[SCSI] qla4xxx: Prevent MSI/MSI-X falling back to INTx for ISP82XX
[SCSI] qla4xxx: Update idc reg in case of PCI AER
[SCSI] qla4xxx: Fix double IDC locking in qla4_8xxx_error_recovery
[SCSI] qla4xxx: Clear interrupt while unloading driver for ISP83XX
[SCSI] qla4xxx: Print correct IDC version
[SCSI] qla4xxx: Added new mbox cmd to pass driver version to FW
[SCSI] scsi_dh_alua: Enable STPG for unavailable ports
[SCSI] scsi_remove_target: fix softlockup regression on hot remove
[SCSI] ibmvscsi: Fix host config length field overflow
[SCSI] ibmvscsi: Remove backend abstraction
...
Diffstat (limited to 'drivers/scsi/ibmvscsi')
-rw-r--r-- | drivers/scsi/ibmvscsi/Makefile | 6 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.c | 36 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.h | 4 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 352 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.h | 22 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/rpa_vscsi.c | 368 |
6 files changed, 369 insertions, 419 deletions
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile index ff5b5c5538ee..cb150d1e5850 100644 --- a/drivers/scsi/ibmvscsi/Makefile +++ b/drivers/scsi/ibmvscsi/Makefile | |||
@@ -1,7 +1,3 @@ | |||
1 | obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o | 1 | obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o |
2 | |||
3 | ibmvscsic-y += ibmvscsi.o | ||
4 | ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o | ||
5 | |||
6 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o | 2 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o |
7 | obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o | 3 | obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 134a0ae85bb7..5e8d51bd03de 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -2242,6 +2242,21 @@ static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key) | |||
2242 | } | 2242 | } |
2243 | 2243 | ||
2244 | /** | 2244 | /** |
2245 | * ibmvfc_match_evt - Match function for specified event | ||
2246 | * @evt: ibmvfc event struct | ||
2247 | * @match: event to match | ||
2248 | * | ||
2249 | * Returns: | ||
2250 | * 1 if event matches key / 0 if event does not match key | ||
2251 | **/ | ||
2252 | static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match) | ||
2253 | { | ||
2254 | if (evt == match) | ||
2255 | return 1; | ||
2256 | return 0; | ||
2257 | } | ||
2258 | |||
2259 | /** | ||
2245 | * ibmvfc_abort_task_set - Abort outstanding commands to the device | 2260 | * ibmvfc_abort_task_set - Abort outstanding commands to the device |
2246 | * @sdev: scsi device to abort commands | 2261 | * @sdev: scsi device to abort commands |
2247 | * | 2262 | * |
@@ -2322,7 +2337,20 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) | |||
2322 | if (rc) { | 2337 | if (rc) { |
2323 | sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n"); | 2338 | sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n"); |
2324 | ibmvfc_reset_host(vhost); | 2339 | ibmvfc_reset_host(vhost); |
2325 | rsp_rc = 0; | 2340 | rsp_rc = -EIO; |
2341 | rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); | ||
2342 | |||
2343 | if (rc == SUCCESS) | ||
2344 | rsp_rc = 0; | ||
2345 | |||
2346 | rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt); | ||
2347 | if (rc != SUCCESS) { | ||
2348 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
2349 | ibmvfc_hard_reset_host(vhost); | ||
2350 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
2351 | rsp_rc = 0; | ||
2352 | } | ||
2353 | |||
2326 | goto out; | 2354 | goto out; |
2327 | } | 2355 | } |
2328 | } | 2356 | } |
@@ -2597,8 +2625,10 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
2597 | case IBMVFC_AE_SCN_FABRIC: | 2625 | case IBMVFC_AE_SCN_FABRIC: |
2598 | case IBMVFC_AE_SCN_DOMAIN: | 2626 | case IBMVFC_AE_SCN_DOMAIN: |
2599 | vhost->events_to_log |= IBMVFC_AE_RSCN; | 2627 | vhost->events_to_log |= IBMVFC_AE_RSCN; |
2600 | vhost->delay_init = 1; | 2628 | if (vhost->state < IBMVFC_HALTED) { |
2601 | __ibmvfc_reset_host(vhost); | 2629 | vhost->delay_init = 1; |
2630 | __ibmvfc_reset_host(vhost); | ||
2631 | } | ||
2602 | break; | 2632 | break; |
2603 | case IBMVFC_AE_SCN_NPORT: | 2633 | case IBMVFC_AE_SCN_NPORT: |
2604 | case IBMVFC_AE_SCN_GROUP: | 2634 | case IBMVFC_AE_SCN_GROUP: |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 834c37fc7ce9..3be8af624e6f 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -29,8 +29,8 @@ | |||
29 | #include "viosrp.h" | 29 | #include "viosrp.h" |
30 | 30 | ||
31 | #define IBMVFC_NAME "ibmvfc" | 31 | #define IBMVFC_NAME "ibmvfc" |
32 | #define IBMVFC_DRIVER_VERSION "1.0.9" | 32 | #define IBMVFC_DRIVER_VERSION "1.0.10" |
33 | #define IBMVFC_DRIVER_DATE "(August 5, 2010)" | 33 | #define IBMVFC_DRIVER_DATE "(August 24, 2012)" |
34 | 34 | ||
35 | #define IBMVFC_DEFAULT_TIMEOUT 60 | 35 | #define IBMVFC_DEFAULT_TIMEOUT 60 |
36 | #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 | 36 | #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 3a6c4742951e..ef9a54c7da67 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -93,13 +93,13 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; | |||
93 | static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; | 93 | static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; |
94 | static int fast_fail = 1; | 94 | static int fast_fail = 1; |
95 | static int client_reserve = 1; | 95 | static int client_reserve = 1; |
96 | static char partition_name[97] = "UNKNOWN"; | ||
97 | static unsigned int partition_number = -1; | ||
96 | 98 | ||
97 | static struct scsi_transport_template *ibmvscsi_transport_template; | 99 | static struct scsi_transport_template *ibmvscsi_transport_template; |
98 | 100 | ||
99 | #define IBMVSCSI_VERSION "1.5.9" | 101 | #define IBMVSCSI_VERSION "1.5.9" |
100 | 102 | ||
101 | static struct ibmvscsi_ops *ibmvscsi_ops; | ||
102 | |||
103 | MODULE_DESCRIPTION("IBM Virtual SCSI"); | 103 | MODULE_DESCRIPTION("IBM Virtual SCSI"); |
104 | MODULE_AUTHOR("Dave Boutcher"); | 104 | MODULE_AUTHOR("Dave Boutcher"); |
105 | MODULE_LICENSE("GPL"); | 105 | MODULE_LICENSE("GPL"); |
@@ -118,6 +118,316 @@ MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); | |||
118 | module_param_named(client_reserve, client_reserve, int, S_IRUGO ); | 118 | module_param_named(client_reserve, client_reserve, int, S_IRUGO ); |
119 | MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); | 119 | MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); |
120 | 120 | ||
121 | static void ibmvscsi_handle_crq(struct viosrp_crq *crq, | ||
122 | struct ibmvscsi_host_data *hostdata); | ||
123 | |||
124 | /* ------------------------------------------------------------ | ||
125 | * Routines for managing the command/response queue | ||
126 | */ | ||
127 | /** | ||
128 | * ibmvscsi_handle_event: - Interrupt handler for crq events | ||
129 | * @irq: number of irq to handle, not used | ||
130 | * @dev_instance: ibmvscsi_host_data of host that received interrupt | ||
131 | * | ||
132 | * Disables interrupts and schedules srp_task | ||
133 | * Always returns IRQ_HANDLED | ||
134 | */ | ||
135 | static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance) | ||
136 | { | ||
137 | struct ibmvscsi_host_data *hostdata = | ||
138 | (struct ibmvscsi_host_data *)dev_instance; | ||
139 | vio_disable_interrupts(to_vio_dev(hostdata->dev)); | ||
140 | tasklet_schedule(&hostdata->srp_task); | ||
141 | return IRQ_HANDLED; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * release_crq_queue: - Deallocates data and unregisters CRQ | ||
146 | * @queue: crq_queue to initialize and register | ||
147 | * @host_data: ibmvscsi_host_data of host | ||
148 | * | ||
149 | * Frees irq, deallocates a page for messages, unmaps dma, and unregisters | ||
150 | * the crq with the hypervisor. | ||
151 | */ | ||
152 | static void ibmvscsi_release_crq_queue(struct crq_queue *queue, | ||
153 | struct ibmvscsi_host_data *hostdata, | ||
154 | int max_requests) | ||
155 | { | ||
156 | long rc = 0; | ||
157 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
158 | free_irq(vdev->irq, (void *)hostdata); | ||
159 | tasklet_kill(&hostdata->srp_task); | ||
160 | do { | ||
161 | if (rc) | ||
162 | msleep(100); | ||
163 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
164 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
165 | dma_unmap_single(hostdata->dev, | ||
166 | queue->msg_token, | ||
167 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); | ||
168 | free_page((unsigned long)queue->msgs); | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * crq_queue_next_crq: - Returns the next entry in message queue | ||
173 | * @queue: crq_queue to use | ||
174 | * | ||
175 | * Returns pointer to next entry in queue, or NULL if there are no new | ||
176 | * entried in the CRQ. | ||
177 | */ | ||
178 | static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) | ||
179 | { | ||
180 | struct viosrp_crq *crq; | ||
181 | unsigned long flags; | ||
182 | |||
183 | spin_lock_irqsave(&queue->lock, flags); | ||
184 | crq = &queue->msgs[queue->cur]; | ||
185 | if (crq->valid & 0x80) { | ||
186 | if (++queue->cur == queue->size) | ||
187 | queue->cur = 0; | ||
188 | } else | ||
189 | crq = NULL; | ||
190 | spin_unlock_irqrestore(&queue->lock, flags); | ||
191 | |||
192 | return crq; | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * ibmvscsi_send_crq: - Send a CRQ | ||
197 | * @hostdata: the adapter | ||
198 | * @word1: the first 64 bits of the data | ||
199 | * @word2: the second 64 bits of the data | ||
200 | */ | ||
201 | static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, | ||
202 | u64 word1, u64 word2) | ||
203 | { | ||
204 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
205 | |||
206 | return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * ibmvscsi_task: - Process srps asynchronously | ||
211 | * @data: ibmvscsi_host_data of host | ||
212 | */ | ||
213 | static void ibmvscsi_task(void *data) | ||
214 | { | ||
215 | struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data; | ||
216 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
217 | struct viosrp_crq *crq; | ||
218 | int done = 0; | ||
219 | |||
220 | while (!done) { | ||
221 | /* Pull all the valid messages off the CRQ */ | ||
222 | while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { | ||
223 | ibmvscsi_handle_crq(crq, hostdata); | ||
224 | crq->valid = 0x00; | ||
225 | } | ||
226 | |||
227 | vio_enable_interrupts(vdev); | ||
228 | crq = crq_queue_next_crq(&hostdata->queue); | ||
229 | if (crq != NULL) { | ||
230 | vio_disable_interrupts(vdev); | ||
231 | ibmvscsi_handle_crq(crq, hostdata); | ||
232 | crq->valid = 0x00; | ||
233 | } else { | ||
234 | done = 1; | ||
235 | } | ||
236 | } | ||
237 | } | ||
238 | |||
239 | static void gather_partition_info(void) | ||
240 | { | ||
241 | struct device_node *rootdn; | ||
242 | |||
243 | const char *ppartition_name; | ||
244 | const unsigned int *p_number_ptr; | ||
245 | |||
246 | /* Retrieve information about this partition */ | ||
247 | rootdn = of_find_node_by_path("/"); | ||
248 | if (!rootdn) { | ||
249 | return; | ||
250 | } | ||
251 | |||
252 | ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL); | ||
253 | if (ppartition_name) | ||
254 | strncpy(partition_name, ppartition_name, | ||
255 | sizeof(partition_name)); | ||
256 | p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); | ||
257 | if (p_number_ptr) | ||
258 | partition_number = *p_number_ptr; | ||
259 | of_node_put(rootdn); | ||
260 | } | ||
261 | |||
262 | static void set_adapter_info(struct ibmvscsi_host_data *hostdata) | ||
263 | { | ||
264 | memset(&hostdata->madapter_info, 0x00, | ||
265 | sizeof(hostdata->madapter_info)); | ||
266 | |||
267 | dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION); | ||
268 | strcpy(hostdata->madapter_info.srp_version, SRP_VERSION); | ||
269 | |||
270 | strncpy(hostdata->madapter_info.partition_name, partition_name, | ||
271 | sizeof(hostdata->madapter_info.partition_name)); | ||
272 | |||
273 | hostdata->madapter_info.partition_number = partition_number; | ||
274 | |||
275 | hostdata->madapter_info.mad_version = 1; | ||
276 | hostdata->madapter_info.os_type = 2; | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * reset_crq_queue: - resets a crq after a failure | ||
281 | * @queue: crq_queue to initialize and register | ||
282 | * @hostdata: ibmvscsi_host_data of host | ||
283 | * | ||
284 | */ | ||
285 | static int ibmvscsi_reset_crq_queue(struct crq_queue *queue, | ||
286 | struct ibmvscsi_host_data *hostdata) | ||
287 | { | ||
288 | int rc = 0; | ||
289 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
290 | |||
291 | /* Close the CRQ */ | ||
292 | do { | ||
293 | if (rc) | ||
294 | msleep(100); | ||
295 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
296 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
297 | |||
298 | /* Clean out the queue */ | ||
299 | memset(queue->msgs, 0x00, PAGE_SIZE); | ||
300 | queue->cur = 0; | ||
301 | |||
302 | set_adapter_info(hostdata); | ||
303 | |||
304 | /* And re-open it again */ | ||
305 | rc = plpar_hcall_norets(H_REG_CRQ, | ||
306 | vdev->unit_address, | ||
307 | queue->msg_token, PAGE_SIZE); | ||
308 | if (rc == 2) { | ||
309 | /* Adapter is good, but other end is not ready */ | ||
310 | dev_warn(hostdata->dev, "Partner adapter not ready\n"); | ||
311 | } else if (rc != 0) { | ||
312 | dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc); | ||
313 | } | ||
314 | return rc; | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * initialize_crq_queue: - Initializes and registers CRQ with hypervisor | ||
319 | * @queue: crq_queue to initialize and register | ||
320 | * @hostdata: ibmvscsi_host_data of host | ||
321 | * | ||
322 | * Allocates a page for messages, maps it for dma, and registers | ||
323 | * the crq with the hypervisor. | ||
324 | * Returns zero on success. | ||
325 | */ | ||
326 | static int ibmvscsi_init_crq_queue(struct crq_queue *queue, | ||
327 | struct ibmvscsi_host_data *hostdata, | ||
328 | int max_requests) | ||
329 | { | ||
330 | int rc; | ||
331 | int retrc; | ||
332 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
333 | |||
334 | queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); | ||
335 | |||
336 | if (!queue->msgs) | ||
337 | goto malloc_failed; | ||
338 | queue->size = PAGE_SIZE / sizeof(*queue->msgs); | ||
339 | |||
340 | queue->msg_token = dma_map_single(hostdata->dev, queue->msgs, | ||
341 | queue->size * sizeof(*queue->msgs), | ||
342 | DMA_BIDIRECTIONAL); | ||
343 | |||
344 | if (dma_mapping_error(hostdata->dev, queue->msg_token)) | ||
345 | goto map_failed; | ||
346 | |||
347 | gather_partition_info(); | ||
348 | set_adapter_info(hostdata); | ||
349 | |||
350 | retrc = rc = plpar_hcall_norets(H_REG_CRQ, | ||
351 | vdev->unit_address, | ||
352 | queue->msg_token, PAGE_SIZE); | ||
353 | if (rc == H_RESOURCE) | ||
354 | /* maybe kexecing and resource is busy. try a reset */ | ||
355 | rc = ibmvscsi_reset_crq_queue(queue, | ||
356 | hostdata); | ||
357 | |||
358 | if (rc == 2) { | ||
359 | /* Adapter is good, but other end is not ready */ | ||
360 | dev_warn(hostdata->dev, "Partner adapter not ready\n"); | ||
361 | retrc = 0; | ||
362 | } else if (rc != 0) { | ||
363 | dev_warn(hostdata->dev, "Error %d opening adapter\n", rc); | ||
364 | goto reg_crq_failed; | ||
365 | } | ||
366 | |||
367 | queue->cur = 0; | ||
368 | spin_lock_init(&queue->lock); | ||
369 | |||
370 | tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, | ||
371 | (unsigned long)hostdata); | ||
372 | |||
373 | if (request_irq(vdev->irq, | ||
374 | ibmvscsi_handle_event, | ||
375 | 0, "ibmvscsi", (void *)hostdata) != 0) { | ||
376 | dev_err(hostdata->dev, "couldn't register irq 0x%x\n", | ||
377 | vdev->irq); | ||
378 | goto req_irq_failed; | ||
379 | } | ||
380 | |||
381 | rc = vio_enable_interrupts(vdev); | ||
382 | if (rc != 0) { | ||
383 | dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc); | ||
384 | goto req_irq_failed; | ||
385 | } | ||
386 | |||
387 | return retrc; | ||
388 | |||
389 | req_irq_failed: | ||
390 | tasklet_kill(&hostdata->srp_task); | ||
391 | rc = 0; | ||
392 | do { | ||
393 | if (rc) | ||
394 | msleep(100); | ||
395 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
396 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
397 | reg_crq_failed: | ||
398 | dma_unmap_single(hostdata->dev, | ||
399 | queue->msg_token, | ||
400 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); | ||
401 | map_failed: | ||
402 | free_page((unsigned long)queue->msgs); | ||
403 | malloc_failed: | ||
404 | return -1; | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * reenable_crq_queue: - reenables a crq after | ||
409 | * @queue: crq_queue to initialize and register | ||
410 | * @hostdata: ibmvscsi_host_data of host | ||
411 | * | ||
412 | */ | ||
413 | static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, | ||
414 | struct ibmvscsi_host_data *hostdata) | ||
415 | { | ||
416 | int rc = 0; | ||
417 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
418 | |||
419 | /* Re-enable the CRQ */ | ||
420 | do { | ||
421 | if (rc) | ||
422 | msleep(100); | ||
423 | rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); | ||
424 | } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
425 | |||
426 | if (rc) | ||
427 | dev_err(hostdata->dev, "Error %d enabling adapter\n", rc); | ||
428 | return rc; | ||
429 | } | ||
430 | |||
121 | /* ------------------------------------------------------------ | 431 | /* ------------------------------------------------------------ |
122 | * Routines for the event pool and event structs | 432 | * Routines for the event pool and event structs |
123 | */ | 433 | */ |
@@ -611,7 +921,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
611 | } | 921 | } |
612 | 922 | ||
613 | if ((rc = | 923 | if ((rc = |
614 | ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { | 924 | ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { |
615 | list_del(&evt_struct->list); | 925 | list_del(&evt_struct->list); |
616 | del_timer(&evt_struct->timer); | 926 | del_timer(&evt_struct->timer); |
617 | 927 | ||
@@ -1420,8 +1730,8 @@ static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd) | |||
1420 | * @hostdata: ibmvscsi_host_data of host | 1730 | * @hostdata: ibmvscsi_host_data of host |
1421 | * | 1731 | * |
1422 | */ | 1732 | */ |
1423 | void ibmvscsi_handle_crq(struct viosrp_crq *crq, | 1733 | static void ibmvscsi_handle_crq(struct viosrp_crq *crq, |
1424 | struct ibmvscsi_host_data *hostdata) | 1734 | struct ibmvscsi_host_data *hostdata) |
1425 | { | 1735 | { |
1426 | long rc; | 1736 | long rc; |
1427 | unsigned long flags; | 1737 | unsigned long flags; |
@@ -1433,8 +1743,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1433 | case 0x01: /* Initialization message */ | 1743 | case 0x01: /* Initialization message */ |
1434 | dev_info(hostdata->dev, "partner initialized\n"); | 1744 | dev_info(hostdata->dev, "partner initialized\n"); |
1435 | /* Send back a response */ | 1745 | /* Send back a response */ |
1436 | if ((rc = ibmvscsi_ops->send_crq(hostdata, | 1746 | rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0); |
1437 | 0xC002000000000000LL, 0)) == 0) { | 1747 | if (rc == 0) { |
1438 | /* Now login */ | 1748 | /* Now login */ |
1439 | init_adapter(hostdata); | 1749 | init_adapter(hostdata); |
1440 | } else { | 1750 | } else { |
@@ -1541,6 +1851,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1541 | 1851 | ||
1542 | host_config = &evt_struct->iu.mad.host_config; | 1852 | host_config = &evt_struct->iu.mad.host_config; |
1543 | 1853 | ||
1854 | /* The transport length field is only 16-bit */ | ||
1855 | length = min(0xffff, length); | ||
1856 | |||
1544 | /* Set up a lun reset SRP command */ | 1857 | /* Set up a lun reset SRP command */ |
1545 | memset(host_config, 0x00, sizeof(*host_config)); | 1858 | memset(host_config, 0x00, sizeof(*host_config)); |
1546 | host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; | 1859 | host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; |
@@ -1840,17 +2153,17 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata) | |||
1840 | smp_rmb(); | 2153 | smp_rmb(); |
1841 | hostdata->reset_crq = 0; | 2154 | hostdata->reset_crq = 0; |
1842 | 2155 | ||
1843 | rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata); | 2156 | rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); |
1844 | if (!rc) | 2157 | if (!rc) |
1845 | rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0); | 2158 | rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0); |
1846 | vio_enable_interrupts(to_vio_dev(hostdata->dev)); | 2159 | vio_enable_interrupts(to_vio_dev(hostdata->dev)); |
1847 | } else if (hostdata->reenable_crq) { | 2160 | } else if (hostdata->reenable_crq) { |
1848 | smp_rmb(); | 2161 | smp_rmb(); |
1849 | action = "enable"; | 2162 | action = "enable"; |
1850 | rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata); | 2163 | rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata); |
1851 | hostdata->reenable_crq = 0; | 2164 | hostdata->reenable_crq = 0; |
1852 | if (!rc) | 2165 | if (!rc) |
1853 | rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0); | 2166 | rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0); |
1854 | } else | 2167 | } else |
1855 | return; | 2168 | return; |
1856 | 2169 | ||
@@ -1944,7 +2257,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1944 | goto init_crq_failed; | 2257 | goto init_crq_failed; |
1945 | } | 2258 | } |
1946 | 2259 | ||
1947 | rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); | 2260 | rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events); |
1948 | if (rc != 0 && rc != H_RESOURCE) { | 2261 | if (rc != 0 && rc != H_RESOURCE) { |
1949 | dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); | 2262 | dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); |
1950 | goto kill_kthread; | 2263 | goto kill_kthread; |
@@ -1974,7 +2287,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1974 | * to fail if the other end is not acive. In that case we don't | 2287 | * to fail if the other end is not acive. In that case we don't |
1975 | * want to scan | 2288 | * want to scan |
1976 | */ | 2289 | */ |
1977 | if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0 | 2290 | if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0 |
1978 | || rc == H_RESOURCE) { | 2291 | || rc == H_RESOURCE) { |
1979 | /* | 2292 | /* |
1980 | * Wait around max init_timeout secs for the adapter to finish | 2293 | * Wait around max init_timeout secs for the adapter to finish |
@@ -2002,7 +2315,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
2002 | add_host_failed: | 2315 | add_host_failed: |
2003 | release_event_pool(&hostdata->pool, hostdata); | 2316 | release_event_pool(&hostdata->pool, hostdata); |
2004 | init_pool_failed: | 2317 | init_pool_failed: |
2005 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); | 2318 | ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); |
2006 | kill_kthread: | 2319 | kill_kthread: |
2007 | kthread_stop(hostdata->work_thread); | 2320 | kthread_stop(hostdata->work_thread); |
2008 | init_crq_failed: | 2321 | init_crq_failed: |
@@ -2018,7 +2331,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev) | |||
2018 | struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); | 2331 | struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); |
2019 | unmap_persist_bufs(hostdata); | 2332 | unmap_persist_bufs(hostdata); |
2020 | release_event_pool(&hostdata->pool, hostdata); | 2333 | release_event_pool(&hostdata->pool, hostdata); |
2021 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, | 2334 | ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, |
2022 | max_events); | 2335 | max_events); |
2023 | 2336 | ||
2024 | kthread_stop(hostdata->work_thread); | 2337 | kthread_stop(hostdata->work_thread); |
@@ -2039,7 +2352,10 @@ static int ibmvscsi_remove(struct vio_dev *vdev) | |||
2039 | static int ibmvscsi_resume(struct device *dev) | 2352 | static int ibmvscsi_resume(struct device *dev) |
2040 | { | 2353 | { |
2041 | struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev); | 2354 | struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev); |
2042 | return ibmvscsi_ops->resume(hostdata); | 2355 | vio_disable_interrupts(to_vio_dev(hostdata->dev)); |
2356 | tasklet_schedule(&hostdata->srp_task); | ||
2357 | |||
2358 | return 0; | ||
2043 | } | 2359 | } |
2044 | 2360 | ||
2045 | /** | 2361 | /** |
@@ -2076,9 +2392,7 @@ int __init ibmvscsi_module_init(void) | |||
2076 | driver_template.can_queue = max_requests; | 2392 | driver_template.can_queue = max_requests; |
2077 | max_events = max_requests + 2; | 2393 | max_events = max_requests + 2; |
2078 | 2394 | ||
2079 | if (firmware_has_feature(FW_FEATURE_VIO)) | 2395 | if (!firmware_has_feature(FW_FEATURE_VIO)) |
2080 | ibmvscsi_ops = &rpavscsi_ops; | ||
2081 | else | ||
2082 | return -ENODEV; | 2396 | return -ENODEV; |
2083 | 2397 | ||
2084 | ibmvscsi_transport_template = | 2398 | ibmvscsi_transport_template = |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index c503e1776014..7d64867c5dd1 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h | |||
@@ -107,26 +107,4 @@ struct ibmvscsi_host_data { | |||
107 | dma_addr_t adapter_info_addr; | 107 | dma_addr_t adapter_info_addr; |
108 | }; | 108 | }; |
109 | 109 | ||
110 | /* routines for managing a command/response queue */ | ||
111 | void ibmvscsi_handle_crq(struct viosrp_crq *crq, | ||
112 | struct ibmvscsi_host_data *hostdata); | ||
113 | |||
114 | struct ibmvscsi_ops { | ||
115 | int (*init_crq_queue)(struct crq_queue *queue, | ||
116 | struct ibmvscsi_host_data *hostdata, | ||
117 | int max_requests); | ||
118 | void (*release_crq_queue)(struct crq_queue *queue, | ||
119 | struct ibmvscsi_host_data *hostdata, | ||
120 | int max_requests); | ||
121 | int (*reset_crq_queue)(struct crq_queue *queue, | ||
122 | struct ibmvscsi_host_data *hostdata); | ||
123 | int (*reenable_crq_queue)(struct crq_queue *queue, | ||
124 | struct ibmvscsi_host_data *hostdata); | ||
125 | int (*send_crq)(struct ibmvscsi_host_data *hostdata, | ||
126 | u64 word1, u64 word2); | ||
127 | int (*resume) (struct ibmvscsi_host_data *hostdata); | ||
128 | }; | ||
129 | |||
130 | extern struct ibmvscsi_ops rpavscsi_ops; | ||
131 | |||
132 | #endif /* IBMVSCSI_H */ | 110 | #endif /* IBMVSCSI_H */ |
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c deleted file mode 100644 index f48ae0190d95..000000000000 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ /dev/null | |||
@@ -1,368 +0,0 @@ | |||
1 | /* ------------------------------------------------------------ | ||
2 | * rpa_vscsi.c | ||
3 | * (C) Copyright IBM Corporation 1994, 2003 | ||
4 | * Authors: Colin DeVilbiss (devilbis@us.ibm.com) | ||
5 | * Santiago Leon (santil@us.ibm.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
20 | * USA | ||
21 | * | ||
22 | * ------------------------------------------------------------ | ||
23 | * RPA-specific functions of the SCSI host adapter for Virtual I/O devices | ||
24 | * | ||
25 | * This driver allows the Linux SCSI peripheral drivers to directly | ||
26 | * access devices in the hosting partition, either on an iSeries | ||
27 | * hypervisor system or a converged hypervisor system. | ||
28 | */ | ||
29 | |||
30 | #include <asm/vio.h> | ||
31 | #include <asm/prom.h> | ||
32 | #include <asm/iommu.h> | ||
33 | #include <asm/hvcall.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/dma-mapping.h> | ||
36 | #include <linux/gfp.h> | ||
37 | #include <linux/interrupt.h> | ||
38 | #include "ibmvscsi.h" | ||
39 | |||
40 | static char partition_name[97] = "UNKNOWN"; | ||
41 | static unsigned int partition_number = -1; | ||
42 | |||
43 | /* ------------------------------------------------------------ | ||
44 | * Routines for managing the command/response queue | ||
45 | */ | ||
46 | /** | ||
47 | * rpavscsi_handle_event: - Interrupt handler for crq events | ||
48 | * @irq: number of irq to handle, not used | ||
49 | * @dev_instance: ibmvscsi_host_data of host that received interrupt | ||
50 | * | ||
51 | * Disables interrupts and schedules srp_task | ||
52 | * Always returns IRQ_HANDLED | ||
53 | */ | ||
54 | static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance) | ||
55 | { | ||
56 | struct ibmvscsi_host_data *hostdata = | ||
57 | (struct ibmvscsi_host_data *)dev_instance; | ||
58 | vio_disable_interrupts(to_vio_dev(hostdata->dev)); | ||
59 | tasklet_schedule(&hostdata->srp_task); | ||
60 | return IRQ_HANDLED; | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * release_crq_queue: - Deallocates data and unregisters CRQ | ||
65 | * @queue: crq_queue to initialize and register | ||
66 | * @host_data: ibmvscsi_host_data of host | ||
67 | * | ||
68 | * Frees irq, deallocates a page for messages, unmaps dma, and unregisters | ||
69 | * the crq with the hypervisor. | ||
70 | */ | ||
71 | static void rpavscsi_release_crq_queue(struct crq_queue *queue, | ||
72 | struct ibmvscsi_host_data *hostdata, | ||
73 | int max_requests) | ||
74 | { | ||
75 | long rc = 0; | ||
76 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
77 | free_irq(vdev->irq, (void *)hostdata); | ||
78 | tasklet_kill(&hostdata->srp_task); | ||
79 | do { | ||
80 | if (rc) | ||
81 | msleep(100); | ||
82 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
83 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
84 | dma_unmap_single(hostdata->dev, | ||
85 | queue->msg_token, | ||
86 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); | ||
87 | free_page((unsigned long)queue->msgs); | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * crq_queue_next_crq: - Returns the next entry in message queue | ||
92 | * @queue: crq_queue to use | ||
93 | * | ||
94 | * Returns pointer to next entry in queue, or NULL if there are no new | ||
95 | * entried in the CRQ. | ||
96 | */ | ||
97 | static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) | ||
98 | { | ||
99 | struct viosrp_crq *crq; | ||
100 | unsigned long flags; | ||
101 | |||
102 | spin_lock_irqsave(&queue->lock, flags); | ||
103 | crq = &queue->msgs[queue->cur]; | ||
104 | if (crq->valid & 0x80) { | ||
105 | if (++queue->cur == queue->size) | ||
106 | queue->cur = 0; | ||
107 | } else | ||
108 | crq = NULL; | ||
109 | spin_unlock_irqrestore(&queue->lock, flags); | ||
110 | |||
111 | return crq; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * rpavscsi_send_crq: - Send a CRQ | ||
116 | * @hostdata: the adapter | ||
117 | * @word1: the first 64 bits of the data | ||
118 | * @word2: the second 64 bits of the data | ||
119 | */ | ||
120 | static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata, | ||
121 | u64 word1, u64 word2) | ||
122 | { | ||
123 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
124 | |||
125 | return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * rpavscsi_task: - Process srps asynchronously | ||
130 | * @data: ibmvscsi_host_data of host | ||
131 | */ | ||
132 | static void rpavscsi_task(void *data) | ||
133 | { | ||
134 | struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data; | ||
135 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
136 | struct viosrp_crq *crq; | ||
137 | int done = 0; | ||
138 | |||
139 | while (!done) { | ||
140 | /* Pull all the valid messages off the CRQ */ | ||
141 | while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { | ||
142 | ibmvscsi_handle_crq(crq, hostdata); | ||
143 | crq->valid = 0x00; | ||
144 | } | ||
145 | |||
146 | vio_enable_interrupts(vdev); | ||
147 | if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { | ||
148 | vio_disable_interrupts(vdev); | ||
149 | ibmvscsi_handle_crq(crq, hostdata); | ||
150 | crq->valid = 0x00; | ||
151 | } else { | ||
152 | done = 1; | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static void gather_partition_info(void) | ||
158 | { | ||
159 | struct device_node *rootdn; | ||
160 | |||
161 | const char *ppartition_name; | ||
162 | const unsigned int *p_number_ptr; | ||
163 | |||
164 | /* Retrieve information about this partition */ | ||
165 | rootdn = of_find_node_by_path("/"); | ||
166 | if (!rootdn) { | ||
167 | return; | ||
168 | } | ||
169 | |||
170 | ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL); | ||
171 | if (ppartition_name) | ||
172 | strncpy(partition_name, ppartition_name, | ||
173 | sizeof(partition_name)); | ||
174 | p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); | ||
175 | if (p_number_ptr) | ||
176 | partition_number = *p_number_ptr; | ||
177 | of_node_put(rootdn); | ||
178 | } | ||
179 | |||
180 | static void set_adapter_info(struct ibmvscsi_host_data *hostdata) | ||
181 | { | ||
182 | memset(&hostdata->madapter_info, 0x00, | ||
183 | sizeof(hostdata->madapter_info)); | ||
184 | |||
185 | dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION); | ||
186 | strcpy(hostdata->madapter_info.srp_version, SRP_VERSION); | ||
187 | |||
188 | strncpy(hostdata->madapter_info.partition_name, partition_name, | ||
189 | sizeof(hostdata->madapter_info.partition_name)); | ||
190 | |||
191 | hostdata->madapter_info.partition_number = partition_number; | ||
192 | |||
193 | hostdata->madapter_info.mad_version = 1; | ||
194 | hostdata->madapter_info.os_type = 2; | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * reset_crq_queue: - resets a crq after a failure | ||
199 | * @queue: crq_queue to initialize and register | ||
200 | * @hostdata: ibmvscsi_host_data of host | ||
201 | * | ||
202 | */ | ||
203 | static int rpavscsi_reset_crq_queue(struct crq_queue *queue, | ||
204 | struct ibmvscsi_host_data *hostdata) | ||
205 | { | ||
206 | int rc = 0; | ||
207 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
208 | |||
209 | /* Close the CRQ */ | ||
210 | do { | ||
211 | if (rc) | ||
212 | msleep(100); | ||
213 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
214 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
215 | |||
216 | /* Clean out the queue */ | ||
217 | memset(queue->msgs, 0x00, PAGE_SIZE); | ||
218 | queue->cur = 0; | ||
219 | |||
220 | set_adapter_info(hostdata); | ||
221 | |||
222 | /* And re-open it again */ | ||
223 | rc = plpar_hcall_norets(H_REG_CRQ, | ||
224 | vdev->unit_address, | ||
225 | queue->msg_token, PAGE_SIZE); | ||
226 | if (rc == 2) { | ||
227 | /* Adapter is good, but other end is not ready */ | ||
228 | dev_warn(hostdata->dev, "Partner adapter not ready\n"); | ||
229 | } else if (rc != 0) { | ||
230 | dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc); | ||
231 | } | ||
232 | return rc; | ||
233 | } | ||
234 | |||
235 | /** | ||
236 | * initialize_crq_queue: - Initializes and registers CRQ with hypervisor | ||
237 | * @queue: crq_queue to initialize and register | ||
238 | * @hostdata: ibmvscsi_host_data of host | ||
239 | * | ||
240 | * Allocates a page for messages, maps it for dma, and registers | ||
241 | * the crq with the hypervisor. | ||
242 | * Returns zero on success. | ||
243 | */ | ||
244 | static int rpavscsi_init_crq_queue(struct crq_queue *queue, | ||
245 | struct ibmvscsi_host_data *hostdata, | ||
246 | int max_requests) | ||
247 | { | ||
248 | int rc; | ||
249 | int retrc; | ||
250 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
251 | |||
252 | queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); | ||
253 | |||
254 | if (!queue->msgs) | ||
255 | goto malloc_failed; | ||
256 | queue->size = PAGE_SIZE / sizeof(*queue->msgs); | ||
257 | |||
258 | queue->msg_token = dma_map_single(hostdata->dev, queue->msgs, | ||
259 | queue->size * sizeof(*queue->msgs), | ||
260 | DMA_BIDIRECTIONAL); | ||
261 | |||
262 | if (dma_mapping_error(hostdata->dev, queue->msg_token)) | ||
263 | goto map_failed; | ||
264 | |||
265 | gather_partition_info(); | ||
266 | set_adapter_info(hostdata); | ||
267 | |||
268 | retrc = rc = plpar_hcall_norets(H_REG_CRQ, | ||
269 | vdev->unit_address, | ||
270 | queue->msg_token, PAGE_SIZE); | ||
271 | if (rc == H_RESOURCE) | ||
272 | /* maybe kexecing and resource is busy. try a reset */ | ||
273 | rc = rpavscsi_reset_crq_queue(queue, | ||
274 | hostdata); | ||
275 | |||
276 | if (rc == 2) { | ||
277 | /* Adapter is good, but other end is not ready */ | ||
278 | dev_warn(hostdata->dev, "Partner adapter not ready\n"); | ||
279 | retrc = 0; | ||
280 | } else if (rc != 0) { | ||
281 | dev_warn(hostdata->dev, "Error %d opening adapter\n", rc); | ||
282 | goto reg_crq_failed; | ||
283 | } | ||
284 | |||
285 | queue->cur = 0; | ||
286 | spin_lock_init(&queue->lock); | ||
287 | |||
288 | tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task, | ||
289 | (unsigned long)hostdata); | ||
290 | |||
291 | if (request_irq(vdev->irq, | ||
292 | rpavscsi_handle_event, | ||
293 | 0, "ibmvscsi", (void *)hostdata) != 0) { | ||
294 | dev_err(hostdata->dev, "couldn't register irq 0x%x\n", | ||
295 | vdev->irq); | ||
296 | goto req_irq_failed; | ||
297 | } | ||
298 | |||
299 | rc = vio_enable_interrupts(vdev); | ||
300 | if (rc != 0) { | ||
301 | dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc); | ||
302 | goto req_irq_failed; | ||
303 | } | ||
304 | |||
305 | return retrc; | ||
306 | |||
307 | req_irq_failed: | ||
308 | tasklet_kill(&hostdata->srp_task); | ||
309 | rc = 0; | ||
310 | do { | ||
311 | if (rc) | ||
312 | msleep(100); | ||
313 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
314 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
315 | reg_crq_failed: | ||
316 | dma_unmap_single(hostdata->dev, | ||
317 | queue->msg_token, | ||
318 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); | ||
319 | map_failed: | ||
320 | free_page((unsigned long)queue->msgs); | ||
321 | malloc_failed: | ||
322 | return -1; | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * reenable_crq_queue: - reenables a crq after | ||
327 | * @queue: crq_queue to initialize and register | ||
328 | * @hostdata: ibmvscsi_host_data of host | ||
329 | * | ||
330 | */ | ||
331 | static int rpavscsi_reenable_crq_queue(struct crq_queue *queue, | ||
332 | struct ibmvscsi_host_data *hostdata) | ||
333 | { | ||
334 | int rc = 0; | ||
335 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
336 | |||
337 | /* Re-enable the CRQ */ | ||
338 | do { | ||
339 | if (rc) | ||
340 | msleep(100); | ||
341 | rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); | ||
342 | } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | ||
343 | |||
344 | if (rc) | ||
345 | dev_err(hostdata->dev, "Error %d enabling adapter\n", rc); | ||
346 | return rc; | ||
347 | } | ||
348 | |||
349 | /** | ||
350 | * rpavscsi_resume: - resume after suspend | ||
351 | * @hostdata: ibmvscsi_host_data of host | ||
352 | * | ||
353 | */ | ||
354 | static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata) | ||
355 | { | ||
356 | vio_disable_interrupts(to_vio_dev(hostdata->dev)); | ||
357 | tasklet_schedule(&hostdata->srp_task); | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | struct ibmvscsi_ops rpavscsi_ops = { | ||
362 | .init_crq_queue = rpavscsi_init_crq_queue, | ||
363 | .release_crq_queue = rpavscsi_release_crq_queue, | ||
364 | .reset_crq_queue = rpavscsi_reset_crq_queue, | ||
365 | .reenable_crq_queue = rpavscsi_reenable_crq_queue, | ||
366 | .send_crq = rpavscsi_send_crq, | ||
367 | .resume = rpavscsi_resume, | ||
368 | }; | ||