diff options
author | Dave C Boutcher <boutcher@cs.umn.edu> | 2006-06-12 22:22:51 -0400 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-06-26 10:15:41 -0400 |
commit | cefbda2d6cd9bf78a93768130729a6d142588d67 (patch) | |
tree | 73c2b6e956d4959c292e550ae230534a2214d23c /drivers/scsi/ibmvscsi | |
parent | c65b1445d153a66ca91b00c1f10187e495c17918 (diff) |
[SCSI] ibmvscsi: treat busy and error conditions separately
This patch fixes a condition where ibmvscsi treats a transport error as a
"busy" condition, so no errors were returned to the scsi mid-layer.
In a RAID environment this means that I/O hung rather than failing
over.
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/ibmvscsi')
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 64 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/rpa_vscsi.c | 5 |
2 files changed, 52 insertions, 17 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 944fc1203ebd..669ea4fff166 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -535,6 +535,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
535 | struct ibmvscsi_host_data *hostdata) | 535 | struct ibmvscsi_host_data *hostdata) |
536 | { | 536 | { |
537 | u64 *crq_as_u64 = (u64 *) &evt_struct->crq; | 537 | u64 *crq_as_u64 = (u64 *) &evt_struct->crq; |
538 | int request_status; | ||
538 | int rc; | 539 | int rc; |
539 | 540 | ||
540 | /* If we have exhausted our request limit, just fail this request. | 541 | /* If we have exhausted our request limit, just fail this request. |
@@ -542,9 +543,18 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
542 | * (such as task management requests) that the mid layer may think we | 543 | * (such as task management requests) that the mid layer may think we |
543 | * can handle more requests (can_queue) when we actually can't | 544 | * can handle more requests (can_queue) when we actually can't |
544 | */ | 545 | */ |
545 | if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) && | 546 | if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) { |
546 | (atomic_dec_if_positive(&hostdata->request_limit) < 0)) | 547 | request_status = |
547 | goto send_error; | 548 | atomic_dec_if_positive(&hostdata->request_limit); |
549 | /* If request limit was -1 when we started, it is now even | ||
550 | * less than that | ||
551 | */ | ||
552 | if (request_status < -1) | ||
553 | goto send_error; | ||
554 | /* Otherwise, if we have run out of requests */ | ||
555 | else if (request_status < 0) | ||
556 | goto send_busy; | ||
557 | } | ||
548 | 558 | ||
549 | /* Copy the IU into the transfer area */ | 559 | /* Copy the IU into the transfer area */ |
550 | *evt_struct->xfer_iu = evt_struct->iu; | 560 | *evt_struct->xfer_iu = evt_struct->iu; |
@@ -567,11 +577,23 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
567 | 577 | ||
568 | return 0; | 578 | return 0; |
569 | 579 | ||
570 | send_error: | 580 | send_busy: |
571 | unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); | 581 | unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); |
572 | 582 | ||
573 | free_event_struct(&hostdata->pool, evt_struct); | 583 | free_event_struct(&hostdata->pool, evt_struct); |
574 | return SCSI_MLQUEUE_HOST_BUSY; | 584 | return SCSI_MLQUEUE_HOST_BUSY; |
585 | |||
586 | send_error: | ||
587 | unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); | ||
588 | |||
589 | if (evt_struct->cmnd != NULL) { | ||
590 | evt_struct->cmnd->result = DID_ERROR << 16; | ||
591 | evt_struct->cmnd_done(evt_struct->cmnd); | ||
592 | } else if (evt_struct->done) | ||
593 | evt_struct->done(evt_struct); | ||
594 | |||
595 | free_event_struct(&hostdata->pool, evt_struct); | ||
596 | return 0; | ||
575 | } | 597 | } |
576 | 598 | ||
577 | /** | 599 | /** |
@@ -1184,27 +1206,37 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1184 | return; | 1206 | return; |
1185 | case 0xFF: /* Hypervisor telling us the connection is closed */ | 1207 | case 0xFF: /* Hypervisor telling us the connection is closed */ |
1186 | scsi_block_requests(hostdata->host); | 1208 | scsi_block_requests(hostdata->host); |
1209 | atomic_set(&hostdata->request_limit, 0); | ||
1187 | if (crq->format == 0x06) { | 1210 | if (crq->format == 0x06) { |
1188 | /* We need to re-setup the interpartition connection */ | 1211 | /* We need to re-setup the interpartition connection */ |
1189 | printk(KERN_INFO | 1212 | printk(KERN_INFO |
1190 | "ibmvscsi: Re-enabling adapter!\n"); | 1213 | "ibmvscsi: Re-enabling adapter!\n"); |
1191 | atomic_set(&hostdata->request_limit, -1); | ||
1192 | purge_requests(hostdata, DID_REQUEUE); | 1214 | purge_requests(hostdata, DID_REQUEUE); |
1193 | if (ibmvscsi_reenable_crq_queue(&hostdata->queue, | 1215 | if ((ibmvscsi_reenable_crq_queue(&hostdata->queue, |
1194 | hostdata) == 0) | 1216 | hostdata) == 0) || |
1195 | if (ibmvscsi_send_crq(hostdata, | 1217 | (ibmvscsi_send_crq(hostdata, |
1196 | 0xC001000000000000LL, 0)) | 1218 | 0xC001000000000000LL, 0))) { |
1219 | atomic_set(&hostdata->request_limit, | ||
1220 | -1); | ||
1197 | printk(KERN_ERR | 1221 | printk(KERN_ERR |
1198 | "ibmvscsi: transmit error after" | 1222 | "ibmvscsi: error after" |
1199 | " enable\n"); | 1223 | " enable\n"); |
1224 | } | ||
1200 | } else { | 1225 | } else { |
1201 | printk(KERN_INFO | 1226 | printk(KERN_INFO |
1202 | "ibmvscsi: Virtual adapter failed rc %d!\n", | 1227 | "ibmvscsi: Virtual adapter failed rc %d!\n", |
1203 | crq->format); | 1228 | crq->format); |
1204 | 1229 | ||
1205 | atomic_set(&hostdata->request_limit, -1); | ||
1206 | purge_requests(hostdata, DID_ERROR); | 1230 | purge_requests(hostdata, DID_ERROR); |
1207 | ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); | 1231 | if ((ibmvscsi_reset_crq_queue(&hostdata->queue, |
1232 | hostdata)) || | ||
1233 | (ibmvscsi_send_crq(hostdata, | ||
1234 | 0xC001000000000000LL, 0))) { | ||
1235 | atomic_set(&hostdata->request_limit, | ||
1236 | -1); | ||
1237 | printk(KERN_ERR | ||
1238 | "ibmvscsi: error after reset\n"); | ||
1239 | } | ||
1208 | } | 1240 | } |
1209 | scsi_unblock_requests(hostdata->host); | 1241 | scsi_unblock_requests(hostdata->host); |
1210 | return; | 1242 | return; |
@@ -1467,6 +1499,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1467 | struct Scsi_Host *host; | 1499 | struct Scsi_Host *host; |
1468 | struct device *dev = &vdev->dev; | 1500 | struct device *dev = &vdev->dev; |
1469 | unsigned long wait_switch = 0; | 1501 | unsigned long wait_switch = 0; |
1502 | int rc; | ||
1470 | 1503 | ||
1471 | vdev->dev.driver_data = NULL; | 1504 | vdev->dev.driver_data = NULL; |
1472 | 1505 | ||
@@ -1484,8 +1517,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1484 | atomic_set(&hostdata->request_limit, -1); | 1517 | atomic_set(&hostdata->request_limit, -1); |
1485 | hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ | 1518 | hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ |
1486 | 1519 | ||
1487 | if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, | 1520 | rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests); |
1488 | max_requests) != 0) { | 1521 | if (rc != 0 && rc != H_RESOURCE) { |
1489 | printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); | 1522 | printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); |
1490 | goto init_crq_failed; | 1523 | goto init_crq_failed; |
1491 | } | 1524 | } |
@@ -1505,7 +1538,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1505 | * to fail if the other end is not acive. In that case we don't | 1538 | * to fail if the other end is not acive. In that case we don't |
1506 | * want to scan | 1539 | * want to scan |
1507 | */ | 1540 | */ |
1508 | if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) { | 1541 | if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0 |
1542 | || rc == H_RESOURCE) { | ||
1509 | /* | 1543 | /* |
1510 | * Wait around max init_timeout secs for the adapter to finish | 1544 | * Wait around max init_timeout secs for the adapter to finish |
1511 | * initializing. When we are done initializing, we will have a | 1545 | * initializing. When we are done initializing, we will have a |
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index 1a9992bdfef8..242b8873b333 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c | |||
@@ -208,6 +208,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
208 | int max_requests) | 208 | int max_requests) |
209 | { | 209 | { |
210 | int rc; | 210 | int rc; |
211 | int retrc; | ||
211 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | 212 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); |
212 | 213 | ||
213 | queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); | 214 | queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); |
@@ -226,7 +227,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
226 | gather_partition_info(); | 227 | gather_partition_info(); |
227 | set_adapter_info(hostdata); | 228 | set_adapter_info(hostdata); |
228 | 229 | ||
229 | rc = plpar_hcall_norets(H_REG_CRQ, | 230 | retrc = rc = plpar_hcall_norets(H_REG_CRQ, |
230 | vdev->unit_address, | 231 | vdev->unit_address, |
231 | queue->msg_token, PAGE_SIZE); | 232 | queue->msg_token, PAGE_SIZE); |
232 | if (rc == H_RESOURCE) | 233 | if (rc == H_RESOURCE) |
@@ -263,7 +264,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
263 | tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, | 264 | tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, |
264 | (unsigned long)hostdata); | 265 | (unsigned long)hostdata); |
265 | 266 | ||
266 | return 0; | 267 | return retrc; |
267 | 268 | ||
268 | req_irq_failed: | 269 | req_irq_failed: |
269 | do { | 270 | do { |