diff options
author | Jeff Skirvin <jeffrey.d.skirvin@intel.com> | 2011-10-27 18:05:06 -0400 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2011-10-31 05:16:23 -0400 |
commit | b343dff1a269bcc0eac123ef541c5476b03d52c1 (patch) | |
tree | d272a97f43f44d19908bb7c5ca997a82be2e0028 /drivers/scsi/isci/task.c | |
parent | d6891682220c18c01bf6838f30e37342c38fde44 (diff) |
[SCSI] isci: Handle task request timeouts correctly.
In the case where "task" requests timeout (note that this class of
requests can also include SATA/STP soft reset FIS transmissions),
handle the case where the task was being managed by some call to
terminate the task request by completing both the tmf and the aborting
process.
Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/isci/task.c')
-rw-r--r-- | drivers/scsi/isci/task.c | 149 |
1 files changed, 107 insertions, 42 deletions
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 7180b048c34b..4175f173868e 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
@@ -338,6 +338,61 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
338 | return ireq; | 338 | return ireq; |
339 | } | 339 | } |
340 | 340 | ||
341 | /** | ||
342 | * isci_request_mark_zombie() - This function must be called with scic_lock held. | ||
343 | */ | ||
344 | static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq) | ||
345 | { | ||
346 | struct completion *tmf_completion = NULL; | ||
347 | struct completion *req_completion; | ||
348 | |||
349 | /* Set the request state to "dead". */ | ||
350 | ireq->status = dead; | ||
351 | |||
352 | req_completion = ireq->io_request_completion; | ||
353 | ireq->io_request_completion = NULL; | ||
354 | |||
355 | if (ireq->ttype == io_task) { | ||
356 | |||
357 | /* Break links with the sas_task - the callback is done | ||
358 | * elsewhere. | ||
359 | */ | ||
360 | struct sas_task *task = isci_request_access_task(ireq); | ||
361 | |||
362 | if (task) | ||
363 | task->lldd_task = NULL; | ||
364 | |||
365 | ireq->ttype_ptr.io_task_ptr = NULL; | ||
366 | } else { | ||
367 | /* Break links with the TMF request. */ | ||
368 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | ||
369 | |||
370 | /* In the case where a task request is dying, | ||
371 | * the thread waiting on the complete will sit and | ||
372 | * timeout unless we wake it now. Since the TMF | ||
373 | * has a default error status, complete it here | ||
374 | * to wake the waiting thread. | ||
375 | */ | ||
376 | if (tmf) { | ||
377 | tmf_completion = tmf->complete; | ||
378 | tmf->complete = NULL; | ||
379 | } | ||
380 | ireq->ttype_ptr.tmf_task_ptr = NULL; | ||
381 | dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n", | ||
382 | __func__, tmf->tmf_code, tmf->io_tag); | ||
383 | } | ||
384 | |||
385 | dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n", | ||
386 | ireq->io_tag); | ||
387 | |||
388 | /* Don't force waiting threads to timeout. */ | ||
389 | if (req_completion) | ||
390 | complete(req_completion); | ||
391 | |||
392 | if (tmf_completion != NULL) | ||
393 | complete(tmf_completion); | ||
394 | } | ||
395 | |||
341 | static int isci_task_execute_tmf(struct isci_host *ihost, | 396 | static int isci_task_execute_tmf(struct isci_host *ihost, |
342 | struct isci_remote_device *idev, | 397 | struct isci_remote_device *idev, |
343 | struct isci_tmf *tmf, unsigned long timeout_ms) | 398 | struct isci_tmf *tmf, unsigned long timeout_ms) |
@@ -375,6 +430,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
375 | 430 | ||
376 | /* Assign the pointer to the TMF's completion kernel wait structure. */ | 431 | /* Assign the pointer to the TMF's completion kernel wait structure. */ |
377 | tmf->complete = &completion; | 432 | tmf->complete = &completion; |
433 | tmf->status = SCI_FAILURE_TIMEOUT; | ||
378 | 434 | ||
379 | ireq = isci_task_request_build(ihost, idev, tag, tmf); | 435 | ireq = isci_task_request_build(ihost, idev, tag, tmf); |
380 | if (!ireq) | 436 | if (!ireq) |
@@ -410,18 +466,35 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
410 | msecs_to_jiffies(timeout_ms)); | 466 | msecs_to_jiffies(timeout_ms)); |
411 | 467 | ||
412 | if (timeleft == 0) { | 468 | if (timeleft == 0) { |
469 | /* The TMF did not complete - this could be because | ||
470 | * of an unplug. Terminate the TMF request now. | ||
471 | */ | ||
413 | spin_lock_irqsave(&ihost->scic_lock, flags); | 472 | spin_lock_irqsave(&ihost->scic_lock, flags); |
414 | 473 | ||
415 | if (tmf->cb_state_func != NULL) | 474 | if (tmf->cb_state_func != NULL) |
416 | tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); | 475 | tmf->cb_state_func(isci_tmf_timed_out, tmf, |
476 | tmf->cb_data); | ||
417 | 477 | ||
418 | sci_controller_terminate_request(ihost, | 478 | sci_controller_terminate_request(ihost, idev, ireq); |
419 | idev, | ||
420 | ireq); | ||
421 | 479 | ||
422 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 480 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
423 | 481 | ||
424 | wait_for_completion(tmf->complete); | 482 | timeleft = wait_for_completion_timeout( |
483 | &completion, | ||
484 | msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); | ||
485 | |||
486 | if (!timeleft) { | ||
487 | /* Strange condition - the termination of the TMF | ||
488 | * request timed-out. | ||
489 | */ | ||
490 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
491 | |||
492 | /* If the TMF status has not changed, kill it. */ | ||
493 | if (tmf->status == SCI_FAILURE_TIMEOUT) | ||
494 | isci_request_mark_zombie(ihost, ireq); | ||
495 | |||
496 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
497 | } | ||
425 | } | 498 | } |
426 | 499 | ||
427 | isci_print_tmf(tmf); | 500 | isci_print_tmf(tmf); |
@@ -645,42 +718,27 @@ static void isci_terminate_request_core(struct isci_host *ihost, | |||
645 | __func__, isci_request, io_request_completion); | 718 | __func__, isci_request, io_request_completion); |
646 | 719 | ||
647 | /* Wait here for the request to complete. */ | 720 | /* Wait here for the request to complete. */ |
648 | #define TERMINATION_TIMEOUT_MSEC 500 | ||
649 | termination_completed | 721 | termination_completed |
650 | = wait_for_completion_timeout( | 722 | = wait_for_completion_timeout( |
651 | io_request_completion, | 723 | io_request_completion, |
652 | msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC)); | 724 | msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); |
653 | 725 | ||
654 | if (!termination_completed) { | 726 | if (!termination_completed) { |
655 | 727 | ||
656 | /* The request to terminate has timed out. */ | 728 | /* The request to terminate has timed out. */ |
657 | spin_lock_irqsave(&ihost->scic_lock, | 729 | spin_lock_irqsave(&ihost->scic_lock, flags); |
658 | flags); | ||
659 | 730 | ||
660 | /* Check for state changes. */ | 731 | /* Check for state changes. */ |
661 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { | 732 | if (!test_bit(IREQ_TERMINATED, |
733 | &isci_request->flags)) { | ||
662 | 734 | ||
663 | /* The best we can do is to have the | 735 | /* The best we can do is to have the |
664 | * request die a silent death if it | 736 | * request die a silent death if it |
665 | * ever really completes. | 737 | * ever really completes. |
666 | * | ||
667 | * Set the request state to "dead", | ||
668 | * and clear the task pointer so that | ||
669 | * an actual completion event callback | ||
670 | * doesn't do anything. | ||
671 | */ | 738 | */ |
672 | isci_request->status = dead; | 739 | isci_request_mark_zombie(ihost, |
673 | isci_request->io_request_completion | 740 | isci_request); |
674 | = NULL; | 741 | needs_cleanup_handling = true; |
675 | |||
676 | if (isci_request->ttype == io_task) { | ||
677 | |||
678 | /* Break links with the | ||
679 | * sas_task. | ||
680 | */ | ||
681 | isci_request->ttype_ptr.io_task_ptr | ||
682 | = NULL; | ||
683 | } | ||
684 | } else | 742 | } else |
685 | termination_completed = 1; | 743 | termination_completed = 1; |
686 | 744 | ||
@@ -1302,7 +1360,8 @@ isci_task_request_complete(struct isci_host *ihost, | |||
1302 | enum sci_task_status completion_status) | 1360 | enum sci_task_status completion_status) |
1303 | { | 1361 | { |
1304 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | 1362 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
1305 | struct completion *tmf_complete; | 1363 | struct completion *tmf_complete = NULL; |
1364 | struct completion *request_complete = ireq->io_request_completion; | ||
1306 | 1365 | ||
1307 | dev_dbg(&ihost->pdev->dev, | 1366 | dev_dbg(&ihost->pdev->dev, |
1308 | "%s: request = %p, status=%d\n", | 1367 | "%s: request = %p, status=%d\n", |
@@ -1310,22 +1369,23 @@ isci_task_request_complete(struct isci_host *ihost, | |||
1310 | 1369 | ||
1311 | isci_request_change_state(ireq, completed); | 1370 | isci_request_change_state(ireq, completed); |
1312 | 1371 | ||
1313 | tmf->status = completion_status; | ||
1314 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); | 1372 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); |
1315 | 1373 | ||
1316 | if (tmf->proto == SAS_PROTOCOL_SSP) { | 1374 | if (tmf) { |
1317 | memcpy(&tmf->resp.resp_iu, | 1375 | tmf->status = completion_status; |
1318 | &ireq->ssp.rsp, | 1376 | |
1319 | SSP_RESP_IU_MAX_SIZE); | 1377 | if (tmf->proto == SAS_PROTOCOL_SSP) { |
1320 | } else if (tmf->proto == SAS_PROTOCOL_SATA) { | 1378 | memcpy(&tmf->resp.resp_iu, |
1321 | memcpy(&tmf->resp.d2h_fis, | 1379 | &ireq->ssp.rsp, |
1322 | &ireq->stp.rsp, | 1380 | SSP_RESP_IU_MAX_SIZE); |
1323 | sizeof(struct dev_to_host_fis)); | 1381 | } else if (tmf->proto == SAS_PROTOCOL_SATA) { |
1382 | memcpy(&tmf->resp.d2h_fis, | ||
1383 | &ireq->stp.rsp, | ||
1384 | sizeof(struct dev_to_host_fis)); | ||
1385 | } | ||
1386 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ | ||
1387 | tmf_complete = tmf->complete; | ||
1324 | } | 1388 | } |
1325 | |||
1326 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ | ||
1327 | tmf_complete = tmf->complete; | ||
1328 | |||
1329 | sci_controller_complete_io(ihost, ireq->target_device, ireq); | 1389 | sci_controller_complete_io(ihost, ireq->target_device, ireq); |
1330 | /* set the 'terminated' flag handle to make sure it cannot be terminated | 1390 | /* set the 'terminated' flag handle to make sure it cannot be terminated |
1331 | * or completed again. | 1391 | * or completed again. |
@@ -1343,8 +1403,13 @@ isci_task_request_complete(struct isci_host *ihost, | |||
1343 | list_del_init(&ireq->dev_node); | 1403 | list_del_init(&ireq->dev_node); |
1344 | } | 1404 | } |
1345 | 1405 | ||
1406 | /* "request_complete" is set if the task was being terminated. */ | ||
1407 | if (request_complete) | ||
1408 | complete(request_complete); | ||
1409 | |||
1346 | /* The task management part completes last. */ | 1410 | /* The task management part completes last. */ |
1347 | complete(tmf_complete); | 1411 | if (tmf_complete) |
1412 | complete(tmf_complete); | ||
1348 | } | 1413 | } |
1349 | 1414 | ||
1350 | static void isci_smp_task_timedout(unsigned long _task) | 1415 | static void isci_smp_task_timedout(unsigned long _task) |