aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-06-13 20:39:44 -0400
committerDan Williams <dan.j.williams@intel.com>2011-07-03 07:04:51 -0400
commit209fae14fabfd48525e5630bebbbd4ca15090c60 (patch)
treeb251b9b394b3493cc15242ea31002abcb4e9bb59 /drivers/scsi/isci
parent360b03ed178a4fe3971b0a098d8feeb53333481b (diff)
isci: atomic device lookup and reference counting
We have unsafe references to remote devices that are notified to disappear at lldd_dev_gone. In order to clean this up we need a single canonical source for device lookups and stable references once a lookup succeeds. Towards that end guarantee that domain_device.lldd_dev is NULL as soon as we start the process of stopping a device. Any code path that wants to safely lookup a remote device must do so through task->dev->lldd_dev (isci_lookup_device()). For in-flight references outside of scic_lock we need reference counting to ensure that the device is not recycled before we are done with it. Simplify device back references to just scic_sds_request.target_device which is now the only permissible internal reference that is maintained relative to the reference count. There were two occasions where we wanted new i/o's to be treated as SAS_TASK_UNDELIVERED but where the domain_dev->lldd_dev link is still intact. Introduce a 'gone' flag to prevent i/o while waiting for libsas to take action on the port down event. One 'core' leftover is that we currently call scic_remote_device_destruct() from isci_remote_device_deconstruct() which is called when the 'core' says the device is stopped. It would be more natural for the final put to trigger isci_remote_device_deconstruct() but this implementation is deferred as it requires other changes. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci')
-rw-r--r--drivers/scsi/isci/host.c4
-rw-r--r--drivers/scsi/isci/port.c3
-rw-r--r--drivers/scsi/isci/remote_device.c80
-rw-r--r--drivers/scsi/isci/remote_device.h23
-rw-r--r--drivers/scsi/isci/request.c60
-rw-r--r--drivers/scsi/isci/request.h7
-rw-r--r--drivers/scsi/isci/sata.c7
-rw-r--r--drivers/scsi/isci/task.c231
-rw-r--r--drivers/scsi/isci/task.h9
9 files changed, 187 insertions, 237 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index ae9edae1d245..40f35fad244b 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -1327,8 +1327,8 @@ void isci_host_deinit(struct isci_host *ihost)
1327 struct isci_remote_device *idev, *d; 1327 struct isci_remote_device *idev, *d;
1328 1328
1329 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) { 1329 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1330 isci_remote_device_change_state(idev, isci_stopping); 1330 if (test_bit(IDEV_ALLOCATED, &idev->flags))
1331 isci_remote_device_stop(ihost, idev); 1331 isci_remote_device_stop(ihost, idev);
1332 } 1332 }
1333 } 1333 }
1334 1334
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index fb66e30da075..5f4a4e3954db 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -321,8 +321,7 @@ static void isci_port_link_down(struct isci_host *isci_host,
321 dev_dbg(&isci_host->pdev->dev, 321 dev_dbg(&isci_host->pdev->dev,
322 "%s: isci_device = %p\n", 322 "%s: isci_device = %p\n",
323 __func__, isci_device); 323 __func__, isci_device);
324 isci_remote_device_change_state(isci_device, 324 set_bit(IDEV_GONE, &isci_device->flags);
325 isci_stopping);
326 } 325 }
327 } 326 }
328 isci_port_change_state(isci_port, isci_stopping); 327 isci_port_change_state(isci_port, isci_stopping);
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 45592ad33c3b..ab5f9868e4ef 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -94,7 +94,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
94 "%s: isci_device = %p\n", __func__, idev); 94 "%s: isci_device = %p\n", __func__, idev);
95 95
96 if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED) 96 if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED)
97 isci_remote_device_change_state(idev, isci_stopping); 97 set_bit(IDEV_GONE, &idev->flags);
98 else 98 else
99 /* device ready is actually a "not ready for io" state. */ 99 /* device ready is actually a "not ready for io" state. */
100 isci_remote_device_change_state(idev, isci_ready); 100 isci_remote_device_change_state(idev, isci_ready);
@@ -449,8 +449,10 @@ static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *
449 /* cleanup requests that failed after starting on the port */ 449 /* cleanup requests that failed after starting on the port */
450 if (status != SCI_SUCCESS) 450 if (status != SCI_SUCCESS)
451 scic_sds_port_complete_io(sci_port, sci_dev, sci_req); 451 scic_sds_port_complete_io(sci_port, sci_dev, sci_req);
452 else 452 else {
453 kref_get(&sci_dev_to_idev(sci_dev)->kref);
453 scic_sds_remote_device_increment_request_count(sci_dev); 454 scic_sds_remote_device_increment_request_count(sci_dev);
455 }
454} 456}
455 457
456enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic, 458enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic,
@@ -656,6 +658,8 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s
656 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " 658 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
657 "could not complete\n", __func__, sci_port, 659 "could not complete\n", __func__, sci_port,
658 sci_dev, sci_req, status); 660 sci_dev, sci_req, status);
661 else
662 isci_put_device(sci_dev_to_idev(sci_dev));
659 663
660 return status; 664 return status;
661} 665}
@@ -860,23 +864,11 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_
860 * here should go through isci_remote_device_nuke_requests. 864 * here should go through isci_remote_device_nuke_requests.
861 * If we hit this condition, we will need a way to complete 865 * If we hit this condition, we will need a way to complete
862 * io requests in process */ 866 * io requests in process */
863 while (!list_empty(&idev->reqs_in_process)) { 867 BUG_ON(!list_empty(&idev->reqs_in_process));
864
865 dev_err(&ihost->pdev->dev,
866 "%s: ** request list not empty! **\n", __func__);
867 BUG();
868 }
869 868
870 scic_remote_device_destruct(&idev->sci); 869 scic_remote_device_destruct(&idev->sci);
871 idev->domain_dev->lldd_dev = NULL;
872 idev->domain_dev = NULL;
873 idev->isci_port = NULL;
874 list_del_init(&idev->node); 870 list_del_init(&idev->node);
875 871 isci_put_device(idev);
876 clear_bit(IDEV_START_PENDING, &idev->flags);
877 clear_bit(IDEV_STOP_PENDING, &idev->flags);
878 clear_bit(IDEV_EH, &idev->flags);
879 wake_up(&ihost->eventq);
880} 872}
881 873
882/** 874/**
@@ -1314,6 +1306,22 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1314 return idev; 1306 return idev;
1315} 1307}
1316 1308
1309void isci_remote_device_release(struct kref *kref)
1310{
1311 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1312 struct isci_host *ihost = idev->isci_port->isci_host;
1313
1314 idev->domain_dev = NULL;
1315 idev->isci_port = NULL;
1316 clear_bit(IDEV_START_PENDING, &idev->flags);
1317 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1318 clear_bit(IDEV_GONE, &idev->flags);
1319 clear_bit(IDEV_EH, &idev->flags);
1320 smp_mb__before_clear_bit();
1321 clear_bit(IDEV_ALLOCATED, &idev->flags);
1322 wake_up(&ihost->eventq);
1323}
1324
1317/** 1325/**
1318 * isci_remote_device_stop() - This function is called internally to stop the 1326 * isci_remote_device_stop() - This function is called internally to stop the
1319 * remote device. 1327 * remote device.
@@ -1330,7 +1338,11 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
1330 dev_dbg(&ihost->pdev->dev, 1338 dev_dbg(&ihost->pdev->dev,
1331 "%s: isci_device = %p\n", __func__, idev); 1339 "%s: isci_device = %p\n", __func__, idev);
1332 1340
1341 spin_lock_irqsave(&ihost->scic_lock, flags);
1342 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1343 set_bit(IDEV_GONE, &idev->flags);
1333 isci_remote_device_change_state(idev, isci_stopping); 1344 isci_remote_device_change_state(idev, isci_stopping);
1345 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1334 1346
1335 /* Kill all outstanding requests. */ 1347 /* Kill all outstanding requests. */
1336 isci_remote_device_nuke_requests(ihost, idev); 1348 isci_remote_device_nuke_requests(ihost, idev);
@@ -1342,14 +1354,10 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
1342 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1354 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1343 1355
1344 /* Wait for the stop complete callback. */ 1356 /* Wait for the stop complete callback. */
1345 if (status == SCI_SUCCESS) { 1357 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1358 /* nothing to wait for */;
1359 else
1346 wait_for_device_stop(ihost, idev); 1360 wait_for_device_stop(ihost, idev);
1347 clear_bit(IDEV_ALLOCATED, &idev->flags);
1348 }
1349
1350 dev_dbg(&ihost->pdev->dev,
1351 "%s: idev = %p - after completion wait\n",
1352 __func__, idev);
1353 1361
1354 return status; 1362 return status;
1355} 1363}
@@ -1416,39 +1424,33 @@ int isci_remote_device_found(struct domain_device *domain_dev)
1416 if (!isci_device) 1424 if (!isci_device)
1417 return -ENODEV; 1425 return -ENODEV;
1418 1426
1427 kref_init(&isci_device->kref);
1419 INIT_LIST_HEAD(&isci_device->node); 1428 INIT_LIST_HEAD(&isci_device->node);
1420 domain_dev->lldd_dev = isci_device; 1429
1430 spin_lock_irq(&isci_host->scic_lock);
1421 isci_device->domain_dev = domain_dev; 1431 isci_device->domain_dev = domain_dev;
1422 isci_device->isci_port = isci_port; 1432 isci_device->isci_port = isci_port;
1423 isci_remote_device_change_state(isci_device, isci_starting); 1433 isci_remote_device_change_state(isci_device, isci_starting);
1424
1425
1426 spin_lock_irq(&isci_host->scic_lock);
1427 list_add_tail(&isci_device->node, &isci_port->remote_dev_list); 1434 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1428 1435
1429 set_bit(IDEV_START_PENDING, &isci_device->flags); 1436 set_bit(IDEV_START_PENDING, &isci_device->flags);
1430 status = isci_remote_device_construct(isci_port, isci_device); 1437 status = isci_remote_device_construct(isci_port, isci_device);
1431 spin_unlock_irq(&isci_host->scic_lock);
1432 1438
1433 dev_dbg(&isci_host->pdev->dev, 1439 dev_dbg(&isci_host->pdev->dev,
1434 "%s: isci_device = %p\n", 1440 "%s: isci_device = %p\n",
1435 __func__, isci_device); 1441 __func__, isci_device);
1436 1442
1437 if (status != SCI_SUCCESS) { 1443 if (status == SCI_SUCCESS) {
1438 1444 /* device came up, advertise it to the world */
1439 spin_lock_irq(&isci_host->scic_lock); 1445 domain_dev->lldd_dev = isci_device;
1440 isci_remote_device_deconstruct( 1446 } else
1441 isci_host, 1447 isci_put_device(isci_device);
1442 isci_device 1448 spin_unlock_irq(&isci_host->scic_lock);
1443 );
1444 spin_unlock_irq(&isci_host->scic_lock);
1445 return -ENODEV;
1446 }
1447 1449
1448 /* wait for the device ready callback. */ 1450 /* wait for the device ready callback. */
1449 wait_for_device_start(isci_host, isci_device); 1451 wait_for_device_start(isci_host, isci_device);
1450 1452
1451 return 0; 1453 return status == SCI_SUCCESS ? 0 : -ENODEV;
1452} 1454}
1453/** 1455/**
1454 * isci_device_is_reset_pending() - This function will check if there is any 1456 * isci_device_is_reset_pending() - This function will check if there is any
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 2b6a5bb7bd60..05842b5f1e3b 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -56,6 +56,7 @@
56#ifndef _ISCI_REMOTE_DEVICE_H_ 56#ifndef _ISCI_REMOTE_DEVICE_H_
57#define _ISCI_REMOTE_DEVICE_H_ 57#define _ISCI_REMOTE_DEVICE_H_
58#include <scsi/libsas.h> 58#include <scsi/libsas.h>
59#include <linux/kref.h>
59#include "scu_remote_node_context.h" 60#include "scu_remote_node_context.h"
60#include "remote_node_context.h" 61#include "remote_node_context.h"
61#include "port.h" 62#include "port.h"
@@ -134,7 +135,9 @@ struct isci_remote_device {
134 #define IDEV_STOP_PENDING 1 135 #define IDEV_STOP_PENDING 1
135 #define IDEV_ALLOCATED 2 136 #define IDEV_ALLOCATED 2
136 #define IDEV_EH 3 137 #define IDEV_EH 3
138 #define IDEV_GONE 4
137 unsigned long flags; 139 unsigned long flags;
140 struct kref kref;
138 struct isci_port *isci_port; 141 struct isci_port *isci_port;
139 struct domain_device *domain_dev; 142 struct domain_device *domain_dev;
140 struct list_head node; 143 struct list_head node;
@@ -145,6 +148,26 @@ struct isci_remote_device {
145 148
146#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 149#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
147 150
151/* device reference routines must be called under scic_lock */
152static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
153{
154 struct isci_remote_device *idev = dev->lldd_dev;
155
156 if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
157 kref_get(&idev->kref);
158 return idev;
159 }
160
161 return NULL;
162}
163
164void isci_remote_device_release(struct kref *kref);
165static inline void isci_put_device(struct isci_remote_device *idev)
166{
167 if (idev)
168 kref_put(&idev->kref, isci_remote_device_release);
169}
170
148enum sci_status isci_remote_device_stop(struct isci_host *ihost, 171enum sci_status isci_remote_device_stop(struct isci_host *ihost,
149 struct isci_remote_device *idev); 172 struct isci_remote_device *idev);
150void isci_remote_device_nuke_requests(struct isci_host *ihost, 173void isci_remote_device_nuke_requests(struct isci_host *ihost,
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index f0813d076c50..fd6314abeb0b 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2313,7 +2313,7 @@ static void isci_request_set_open_reject_status(
2313 * none. 2313 * none.
2314 */ 2314 */
2315static void isci_request_handle_controller_specific_errors( 2315static void isci_request_handle_controller_specific_errors(
2316 struct isci_remote_device *isci_device, 2316 struct isci_remote_device *idev,
2317 struct isci_request *request, 2317 struct isci_request *request,
2318 struct sas_task *task, 2318 struct sas_task *task,
2319 enum service_response *response_ptr, 2319 enum service_response *response_ptr,
@@ -2353,8 +2353,7 @@ static void isci_request_handle_controller_specific_errors(
2353 * that we ignore the quiesce state, since we are 2353 * that we ignore the quiesce state, since we are
2354 * concerned about the actual device state. 2354 * concerned about the actual device state.
2355 */ 2355 */
2356 if ((isci_device->status == isci_stopping) || 2356 if (!idev)
2357 (isci_device->status == isci_stopped))
2358 *status_ptr = SAS_DEVICE_UNKNOWN; 2357 *status_ptr = SAS_DEVICE_UNKNOWN;
2359 else 2358 else
2360 *status_ptr = SAS_ABORTED_TASK; 2359 *status_ptr = SAS_ABORTED_TASK;
@@ -2367,8 +2366,7 @@ static void isci_request_handle_controller_specific_errors(
2367 /* Task in the target is not done. */ 2366 /* Task in the target is not done. */
2368 *response_ptr = SAS_TASK_UNDELIVERED; 2367 *response_ptr = SAS_TASK_UNDELIVERED;
2369 2368
2370 if ((isci_device->status == isci_stopping) || 2369 if (!idev)
2371 (isci_device->status == isci_stopped))
2372 *status_ptr = SAS_DEVICE_UNKNOWN; 2370 *status_ptr = SAS_DEVICE_UNKNOWN;
2373 else 2371 else
2374 *status_ptr = SAM_STAT_TASK_ABORTED; 2372 *status_ptr = SAM_STAT_TASK_ABORTED;
@@ -2399,8 +2397,7 @@ static void isci_request_handle_controller_specific_errors(
2399 * that we ignore the quiesce state, since we are 2397 * that we ignore the quiesce state, since we are
2400 * concerned about the actual device state. 2398 * concerned about the actual device state.
2401 */ 2399 */
2402 if ((isci_device->status == isci_stopping) || 2400 if (!idev)
2403 (isci_device->status == isci_stopped))
2404 *status_ptr = SAS_DEVICE_UNKNOWN; 2401 *status_ptr = SAS_DEVICE_UNKNOWN;
2405 else 2402 else
2406 *status_ptr = SAS_ABORTED_TASK; 2403 *status_ptr = SAS_ABORTED_TASK;
@@ -2629,7 +2626,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2629 struct ssp_response_iu *resp_iu; 2626 struct ssp_response_iu *resp_iu;
2630 void *resp_buf; 2627 void *resp_buf;
2631 unsigned long task_flags; 2628 unsigned long task_flags;
2632 struct isci_remote_device *isci_device = request->isci_device; 2629 struct isci_remote_device *idev = isci_lookup_device(task->dev);
2633 enum service_response response = SAS_TASK_UNDELIVERED; 2630 enum service_response response = SAS_TASK_UNDELIVERED;
2634 enum exec_status status = SAS_ABORTED_TASK; 2631 enum exec_status status = SAS_ABORTED_TASK;
2635 enum isci_request_status request_status; 2632 enum isci_request_status request_status;
@@ -2672,9 +2669,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2672 * that we ignore the quiesce state, since we are 2669 * that we ignore the quiesce state, since we are
2673 * concerned about the actual device state. 2670 * concerned about the actual device state.
2674 */ 2671 */
2675 if ((isci_device->status == isci_stopping) 2672 if (!idev)
2676 || (isci_device->status == isci_stopped)
2677 )
2678 status = SAS_DEVICE_UNKNOWN; 2673 status = SAS_DEVICE_UNKNOWN;
2679 else 2674 else
2680 status = SAS_ABORTED_TASK; 2675 status = SAS_ABORTED_TASK;
@@ -2697,8 +2692,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2697 request->complete_in_target = true; 2692 request->complete_in_target = true;
2698 response = SAS_TASK_UNDELIVERED; 2693 response = SAS_TASK_UNDELIVERED;
2699 2694
2700 if ((isci_device->status == isci_stopping) || 2695 if (!idev)
2701 (isci_device->status == isci_stopped))
2702 /* The device has been /is being stopped. Note that 2696 /* The device has been /is being stopped. Note that
2703 * we ignore the quiesce state, since we are 2697 * we ignore the quiesce state, since we are
2704 * concerned about the actual device state. 2698 * concerned about the actual device state.
@@ -2728,8 +2722,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2728 * that we ignore the quiesce state, since we are 2722 * that we ignore the quiesce state, since we are
2729 * concerned about the actual device state. 2723 * concerned about the actual device state.
2730 */ 2724 */
2731 if ((isci_device->status == isci_stopping) || 2725 if (!idev)
2732 (isci_device->status == isci_stopped))
2733 status = SAS_DEVICE_UNKNOWN; 2726 status = SAS_DEVICE_UNKNOWN;
2734 else 2727 else
2735 status = SAS_ABORTED_TASK; 2728 status = SAS_ABORTED_TASK;
@@ -2861,8 +2854,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2861 * that we ignore the quiesce state, since we are 2854 * that we ignore the quiesce state, since we are
2862 * concerned about the actual device state. 2855 * concerned about the actual device state.
2863 */ 2856 */
2864 if ((isci_device->status == isci_stopping) || 2857 if (!idev)
2865 (isci_device->status == isci_stopped))
2866 status = SAS_DEVICE_UNKNOWN; 2858 status = SAS_DEVICE_UNKNOWN;
2867 else 2859 else
2868 status = SAS_ABORTED_TASK; 2860 status = SAS_ABORTED_TASK;
@@ -2873,7 +2865,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2873 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 2865 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2874 2866
2875 isci_request_handle_controller_specific_errors( 2867 isci_request_handle_controller_specific_errors(
2876 isci_device, request, task, &response, &status, 2868 idev, request, task, &response, &status,
2877 &complete_to_host); 2869 &complete_to_host);
2878 2870
2879 break; 2871 break;
@@ -2902,8 +2894,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2902 2894
2903 /* Fail the I/O so it can be retried. */ 2895 /* Fail the I/O so it can be retried. */
2904 response = SAS_TASK_UNDELIVERED; 2896 response = SAS_TASK_UNDELIVERED;
2905 if ((isci_device->status == isci_stopping) || 2897 if (!idev)
2906 (isci_device->status == isci_stopped))
2907 status = SAS_DEVICE_UNKNOWN; 2898 status = SAS_DEVICE_UNKNOWN;
2908 else 2899 else
2909 status = SAS_ABORTED_TASK; 2900 status = SAS_ABORTED_TASK;
@@ -2926,8 +2917,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2926 * that we ignore the quiesce state, since we are 2917 * that we ignore the quiesce state, since we are
2927 * concerned about the actual device state. 2918 * concerned about the actual device state.
2928 */ 2919 */
2929 if ((isci_device->status == isci_stopping) || 2920 if (!idev)
2930 (isci_device->status == isci_stopped))
2931 status = SAS_DEVICE_UNKNOWN; 2921 status = SAS_DEVICE_UNKNOWN;
2932 else 2922 else
2933 status = SAS_ABORTED_TASK; 2923 status = SAS_ABORTED_TASK;
@@ -2953,8 +2943,10 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2953 2943
2954 /* complete the io request to the core. */ 2944 /* complete the io request to the core. */
2955 scic_controller_complete_io(&isci_host->sci, 2945 scic_controller_complete_io(&isci_host->sci,
2956 &isci_device->sci, 2946 request->sci.target_device,
2957 &request->sci); 2947 &request->sci);
2948 isci_put_device(idev);
2949
2958 /* set terminated handle so it cannot be completed or 2950 /* set terminated handle so it cannot be completed or
2959 * terminated again, and to cause any calls into abort 2951 * terminated again, and to cause any calls into abort
2960 * task to recognize the already completed case. 2952 * task to recognize the already completed case.
@@ -3511,7 +3503,6 @@ static enum sci_status isci_io_request_build(
3511} 3503}
3512 3504
3513static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, 3505static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
3514 struct isci_remote_device *idev,
3515 gfp_t gfp_flags) 3506 gfp_t gfp_flags)
3516{ 3507{
3517 dma_addr_t handle; 3508 dma_addr_t handle;
@@ -3528,7 +3519,6 @@ static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
3528 spin_lock_init(&ireq->state_lock); 3519 spin_lock_init(&ireq->state_lock);
3529 ireq->request_daddr = handle; 3520 ireq->request_daddr = handle;
3530 ireq->isci_host = ihost; 3521 ireq->isci_host = ihost;
3531 ireq->isci_device = idev;
3532 ireq->io_request_completion = NULL; 3522 ireq->io_request_completion = NULL;
3533 ireq->terminated = false; 3523 ireq->terminated = false;
3534 3524
@@ -3546,12 +3536,11 @@ static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
3546 3536
3547static struct isci_request *isci_request_alloc_io(struct isci_host *ihost, 3537static struct isci_request *isci_request_alloc_io(struct isci_host *ihost,
3548 struct sas_task *task, 3538 struct sas_task *task,
3549 struct isci_remote_device *idev,
3550 gfp_t gfp_flags) 3539 gfp_t gfp_flags)
3551{ 3540{
3552 struct isci_request *ireq; 3541 struct isci_request *ireq;
3553 3542
3554 ireq = isci_request_alloc_core(ihost, idev, gfp_flags); 3543 ireq = isci_request_alloc_core(ihost, gfp_flags);
3555 if (ireq) { 3544 if (ireq) {
3556 ireq->ttype_ptr.io_task_ptr = task; 3545 ireq->ttype_ptr.io_task_ptr = task;
3557 ireq->ttype = io_task; 3546 ireq->ttype = io_task;
@@ -3562,12 +3551,11 @@ static struct isci_request *isci_request_alloc_io(struct isci_host *ihost,
3562 3551
3563struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, 3552struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
3564 struct isci_tmf *isci_tmf, 3553 struct isci_tmf *isci_tmf,
3565 struct isci_remote_device *idev,
3566 gfp_t gfp_flags) 3554 gfp_t gfp_flags)
3567{ 3555{
3568 struct isci_request *ireq; 3556 struct isci_request *ireq;
3569 3557
3570 ireq = isci_request_alloc_core(ihost, idev, gfp_flags); 3558 ireq = isci_request_alloc_core(ihost, gfp_flags);
3571 if (ireq) { 3559 if (ireq) {
3572 ireq->ttype_ptr.tmf_task_ptr = isci_tmf; 3560 ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3573 ireq->ttype = tmf_task; 3561 ireq->ttype = tmf_task;
@@ -3575,21 +3563,16 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
3575 return ireq; 3563 return ireq;
3576} 3564}
3577 3565
3578int isci_request_execute(struct isci_host *ihost, struct sas_task *task, 3566int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3579 gfp_t gfp_flags) 3567 struct sas_task *task, gfp_t gfp_flags)
3580{ 3568{
3581 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3569 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3582 struct scic_sds_remote_device *sci_dev;
3583 struct isci_remote_device *idev;
3584 struct isci_request *ireq; 3570 struct isci_request *ireq;
3585 unsigned long flags; 3571 unsigned long flags;
3586 int ret = 0; 3572 int ret = 0;
3587 3573
3588 idev = task->dev->lldd_dev;
3589 sci_dev = &idev->sci;
3590
3591 /* do common allocation and init of request object. */ 3574 /* do common allocation and init of request object. */
3592 ireq = isci_request_alloc_io(ihost, task, idev, gfp_flags); 3575 ireq = isci_request_alloc_io(ihost, task, gfp_flags);
3593 if (!ireq) 3576 if (!ireq)
3594 goto out; 3577 goto out;
3595 3578
@@ -3605,8 +3588,7 @@ int isci_request_execute(struct isci_host *ihost, struct sas_task *task,
3605 spin_lock_irqsave(&ihost->scic_lock, flags); 3588 spin_lock_irqsave(&ihost->scic_lock, flags);
3606 3589
3607 /* send the request, let the core assign the IO TAG. */ 3590 /* send the request, let the core assign the IO TAG. */
3608 status = scic_controller_start_io(&ihost->sci, sci_dev, 3591 status = scic_controller_start_io(&ihost->sci, &idev->sci, &ireq->sci,
3609 &ireq->sci,
3610 SCI_CONTROLLER_INVALID_IO_TAG); 3592 SCI_CONTROLLER_INVALID_IO_TAG);
3611 if (status != SCI_SUCCESS && 3593 if (status != SCI_SUCCESS &&
3612 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3594 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 8de2542f081f..9bb7c36257f3 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -285,7 +285,6 @@ struct isci_request {
285 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ 285 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
286 } ttype_ptr; 286 } ttype_ptr;
287 struct isci_host *isci_host; 287 struct isci_host *isci_host;
288 struct isci_remote_device *isci_device;
289 /* For use in the requests_to_{complete|abort} lists: */ 288 /* For use in the requests_to_{complete|abort} lists: */
290 struct list_head completed_node; 289 struct list_head completed_node;
291 /* For use in the reqs_in_process list: */ 290 /* For use in the reqs_in_process list: */
@@ -681,12 +680,10 @@ static inline void isci_request_free(struct isci_host *isci_host,
681 680
682struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, 681struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
683 struct isci_tmf *isci_tmf, 682 struct isci_tmf *isci_tmf,
684 struct isci_remote_device *idev,
685 gfp_t gfp_flags); 683 gfp_t gfp_flags);
686 684
687int isci_request_execute(struct isci_host *isci_host, 685int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
688 struct sas_task *task, 686 struct sas_task *task, gfp_t gfp_flags);
689 gfp_t gfp_flags);
690 687
691/** 688/**
692 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given 689 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c
index b9b9271d4736..e7ce46924465 100644
--- a/drivers/scsi/isci/sata.c
+++ b/drivers/scsi/isci/sata.c
@@ -213,11 +213,10 @@ int isci_task_send_lu_reset_sata(
213 213
214 /* Send the soft reset to the target */ 214 /* Send the soft reset to the target */
215 #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */ 215 #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
216 isci_task_build_tmf(&tmf, isci_device, isci_tmf_sata_srst_high, 216 isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
217 NULL, NULL
218 );
219 217
220 ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_SRST_TIMEOUT_MS); 218 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
219 ISCI_SRST_TIMEOUT_MS);
221 220
222 if (ret != TMF_RESP_FUNC_COMPLETE) { 221 if (ret != TMF_RESP_FUNC_COMPLETE) {
223 dev_warn(&isci_host->pdev->dev, 222 dev_warn(&isci_host->pdev->dev,
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index ded81cd1a781..dd5e9de1ffb5 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -146,7 +146,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
146int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) 146int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
147{ 147{
148 struct isci_host *ihost = dev_to_ihost(task->dev); 148 struct isci_host *ihost = dev_to_ihost(task->dev);
149 struct isci_remote_device *device; 149 struct isci_remote_device *idev;
150 unsigned long flags; 150 unsigned long flags;
151 int ret; 151 int ret;
152 enum sci_status status; 152 enum sci_status status;
@@ -166,11 +166,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
166 dev_dbg(&ihost->pdev->dev, 166 dev_dbg(&ihost->pdev->dev,
167 "task = %p, num = %d; dev = %p; cmd = %p\n", 167 "task = %p, num = %d; dev = %p; cmd = %p\n",
168 task, num, task->dev, task->uldd_task); 168 task, num, task->dev, task->uldd_task);
169 spin_lock_irqsave(&ihost->scic_lock, flags);
170 idev = isci_lookup_device(task->dev);
171 spin_unlock_irqrestore(&ihost->scic_lock, flags);
169 172
170 device = task->dev->lldd_dev; 173 if (idev)
171 174 device_status = idev->status;
172 if (device)
173 device_status = device->status;
174 else 175 else
175 device_status = isci_freed; 176 device_status = isci_freed;
176 177
@@ -188,7 +189,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
188 __func__, 189 __func__,
189 task, 190 task,
190 isci_host_get_state(ihost), 191 isci_host_get_state(ihost),
191 device, 192 idev,
192 device_status); 193 device_status);
193 194
194 if (device_status == isci_ready) { 195 if (device_status == isci_ready) {
@@ -225,7 +226,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
225 spin_unlock_irqrestore(&task->task_state_lock, flags); 226 spin_unlock_irqrestore(&task->task_state_lock, flags);
226 227
227 /* build and send the request. */ 228 /* build and send the request. */
228 status = isci_request_execute(ihost, task, gfp_flags); 229 status = isci_request_execute(ihost, idev, task, gfp_flags);
229 230
230 if (status != SCI_SUCCESS) { 231 if (status != SCI_SUCCESS) {
231 232
@@ -248,33 +249,31 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
248 } 249 }
249 } 250 }
250 } 251 }
252 isci_put_device(idev);
251 } 253 }
252 return 0; 254 return 0;
253} 255}
254 256
255static struct isci_request *isci_task_request_build(struct isci_host *ihost, 257static struct isci_request *isci_task_request_build(struct isci_host *ihost,
258 struct isci_remote_device *idev,
256 struct isci_tmf *isci_tmf) 259 struct isci_tmf *isci_tmf)
257{ 260{
258 struct scic_sds_remote_device *sci_dev;
259 enum sci_status status = SCI_FAILURE; 261 enum sci_status status = SCI_FAILURE;
260 struct isci_request *ireq = NULL; 262 struct isci_request *ireq = NULL;
261 struct isci_remote_device *idev;
262 struct domain_device *dev; 263 struct domain_device *dev;
263 264
264 dev_dbg(&ihost->pdev->dev, 265 dev_dbg(&ihost->pdev->dev,
265 "%s: isci_tmf = %p\n", __func__, isci_tmf); 266 "%s: isci_tmf = %p\n", __func__, isci_tmf);
266 267
267 idev = isci_tmf->device;
268 sci_dev = &idev->sci;
269 dev = idev->domain_dev; 268 dev = idev->domain_dev;
270 269
271 /* do common allocation and init of request object. */ 270 /* do common allocation and init of request object. */
272 ireq = isci_request_alloc_tmf(ihost, isci_tmf, idev, GFP_ATOMIC); 271 ireq = isci_request_alloc_tmf(ihost, isci_tmf, GFP_ATOMIC);
273 if (!ireq) 272 if (!ireq)
274 return NULL; 273 return NULL;
275 274
276 /* let the core do it's construct. */ 275 /* let the core do it's construct. */
277 status = scic_task_request_construct(&ihost->sci, sci_dev, 276 status = scic_task_request_construct(&ihost->sci, &idev->sci,
278 SCI_CONTROLLER_INVALID_IO_TAG, 277 SCI_CONTROLLER_INVALID_IO_TAG,
279 &ireq->sci); 278 &ireq->sci);
280 279
@@ -309,25 +308,13 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
309 return ireq; 308 return ireq;
310} 309}
311 310
312/** 311int isci_task_execute_tmf(struct isci_host *ihost,
313 * isci_task_execute_tmf() - This function builds and sends a task request, 312 struct isci_remote_device *isci_device,
314 * then waits for the completion. 313 struct isci_tmf *tmf, unsigned long timeout_ms)
315 * @isci_host: This parameter specifies the ISCI host object
316 * @tmf: This parameter is the pointer to the task management structure for
317 * this request.
318 * @timeout_ms: This parameter specifies the timeout period for the task
319 * management request.
320 *
321 * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes
322 * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED.
323 */
324int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf,
325 unsigned long timeout_ms)
326{ 314{
327 DECLARE_COMPLETION_ONSTACK(completion); 315 DECLARE_COMPLETION_ONSTACK(completion);
328 enum sci_task_status status = SCI_TASK_FAILURE; 316 enum sci_task_status status = SCI_TASK_FAILURE;
329 struct scic_sds_remote_device *sci_device; 317 struct scic_sds_remote_device *sci_device;
330 struct isci_remote_device *isci_device = tmf->device;
331 struct isci_request *ireq; 318 struct isci_request *ireq;
332 int ret = TMF_RESP_FUNC_FAILED; 319 int ret = TMF_RESP_FUNC_FAILED;
333 unsigned long flags; 320 unsigned long flags;
@@ -352,7 +339,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf,
352 /* Assign the pointer to the TMF's completion kernel wait structure. */ 339 /* Assign the pointer to the TMF's completion kernel wait structure. */
353 tmf->complete = &completion; 340 tmf->complete = &completion;
354 341
355 ireq = isci_task_request_build(ihost, tmf); 342 ireq = isci_task_request_build(ihost, isci_device, tmf);
356 if (!ireq) { 343 if (!ireq) {
357 dev_warn(&ihost->pdev->dev, 344 dev_warn(&ihost->pdev->dev,
358 "%s: isci_task_request_build failed\n", 345 "%s: isci_task_request_build failed\n",
@@ -399,10 +386,9 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf,
399 if (tmf->cb_state_func != NULL) 386 if (tmf->cb_state_func != NULL)
400 tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); 387 tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
401 388
402 status = scic_controller_terminate_request( 389 status = scic_controller_terminate_request(&ihost->sci,
403 &ireq->isci_host->sci, 390 &isci_device->sci,
404 &ireq->isci_device->sci, 391 &ireq->sci);
405 &ireq->sci);
406 392
407 spin_unlock_irqrestore(&ihost->scic_lock, flags); 393 spin_unlock_irqrestore(&ihost->scic_lock, flags);
408 } 394 }
@@ -437,65 +423,32 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf,
437 423
438void isci_task_build_tmf( 424void isci_task_build_tmf(
439 struct isci_tmf *tmf, 425 struct isci_tmf *tmf,
440 struct isci_remote_device *isci_device,
441 enum isci_tmf_function_codes code, 426 enum isci_tmf_function_codes code,
442 void (*tmf_sent_cb)(enum isci_tmf_cb_state, 427 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
443 struct isci_tmf *, 428 struct isci_tmf *,
444 void *), 429 void *),
445 void *cb_data) 430 void *cb_data)
446{ 431{
447 dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
448 "%s: isci_device = %p\n", __func__, isci_device);
449
450 memset(tmf, 0, sizeof(*tmf)); 432 memset(tmf, 0, sizeof(*tmf));
451 433
452 tmf->device = isci_device;
453 tmf->tmf_code = code; 434 tmf->tmf_code = code;
454
455 tmf->cb_state_func = tmf_sent_cb; 435 tmf->cb_state_func = tmf_sent_cb;
456 tmf->cb_data = cb_data; 436 tmf->cb_data = cb_data;
457} 437}
458 438
459static void isci_task_build_abort_task_tmf( 439static void isci_task_build_abort_task_tmf(
460 struct isci_tmf *tmf, 440 struct isci_tmf *tmf,
461 struct isci_remote_device *isci_device,
462 enum isci_tmf_function_codes code, 441 enum isci_tmf_function_codes code,
463 void (*tmf_sent_cb)(enum isci_tmf_cb_state, 442 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
464 struct isci_tmf *, 443 struct isci_tmf *,
465 void *), 444 void *),
466 struct isci_request *old_request) 445 struct isci_request *old_request)
467{ 446{
468 isci_task_build_tmf(tmf, isci_device, code, tmf_sent_cb, 447 isci_task_build_tmf(tmf, code, tmf_sent_cb,
469 (void *)old_request); 448 (void *)old_request);
470 tmf->io_tag = old_request->io_tag; 449 tmf->io_tag = old_request->io_tag;
471} 450}
472 451
473static struct isci_request *isci_task_get_request_from_task(
474 struct sas_task *task,
475 struct isci_remote_device **isci_device)
476{
477
478 struct isci_request *request = NULL;
479 unsigned long flags;
480
481 spin_lock_irqsave(&task->task_state_lock, flags);
482
483 request = task->lldd_task;
484
485 /* If task is already done, the request isn't valid */
486 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
487 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
488 (request != NULL)) {
489
490 if (isci_device != NULL)
491 *isci_device = request->isci_device;
492 }
493
494 spin_unlock_irqrestore(&task->task_state_lock, flags);
495
496 return request;
497}
498
499/** 452/**
500 * isci_task_validate_request_to_abort() - This function checks the given I/O 453 * isci_task_validate_request_to_abort() - This function checks the given I/O
501 * against the "started" state. If the request is still "started", it's 454 * against the "started" state. If the request is still "started", it's
@@ -858,11 +811,10 @@ static int isci_task_send_lu_reset_sas(
858 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or 811 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
859 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). 812 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
860 */ 813 */
861 isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL, 814 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
862 NULL);
863 815
864 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ 816 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
865 ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS); 817 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
866 818
867 if (ret == TMF_RESP_FUNC_COMPLETE) 819 if (ret == TMF_RESP_FUNC_COMPLETE)
868 dev_dbg(&isci_host->pdev->dev, 820 dev_dbg(&isci_host->pdev->dev,
@@ -888,33 +840,33 @@ static int isci_task_send_lu_reset_sas(
888int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun) 840int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
889{ 841{
890 struct isci_host *isci_host = dev_to_ihost(domain_device); 842 struct isci_host *isci_host = dev_to_ihost(domain_device);
891 struct isci_remote_device *isci_device = NULL; 843 struct isci_remote_device *isci_device;
844 unsigned long flags;
892 int ret; 845 int ret;
893 bool device_stopping = false;
894 846
895 isci_device = domain_device->lldd_dev; 847 spin_lock_irqsave(&isci_host->scic_lock, flags);
848 isci_device = isci_lookup_device(domain_device);
849 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
896 850
897 dev_dbg(&isci_host->pdev->dev, 851 dev_dbg(&isci_host->pdev->dev,
898 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", 852 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
899 __func__, domain_device, isci_host, isci_device); 853 __func__, domain_device, isci_host, isci_device);
900 854
901 if (isci_device != NULL) { 855 if (isci_device)
902 device_stopping = (isci_device->status == isci_stopping)
903 || (isci_device->status == isci_stopped);
904 set_bit(IDEV_EH, &isci_device->flags); 856 set_bit(IDEV_EH, &isci_device->flags);
905 }
906 857
907 /* If there is a device reset pending on any request in the 858 /* If there is a device reset pending on any request in the
908 * device's list, fail this LUN reset request in order to 859 * device's list, fail this LUN reset request in order to
909 * escalate to the device reset. 860 * escalate to the device reset.
910 */ 861 */
911 if (!isci_device || device_stopping || 862 if (!isci_device ||
912 isci_device_is_reset_pending(isci_host, isci_device)) { 863 isci_device_is_reset_pending(isci_host, isci_device)) {
913 dev_warn(&isci_host->pdev->dev, 864 dev_warn(&isci_host->pdev->dev,
914 "%s: No dev (%p), or " 865 "%s: No dev (%p), or "
915 "RESET PENDING: domain_device=%p\n", 866 "RESET PENDING: domain_device=%p\n",
916 __func__, isci_device, domain_device); 867 __func__, isci_device, domain_device);
917 return TMF_RESP_FUNC_FAILED; 868 ret = TMF_RESP_FUNC_FAILED;
869 goto out;
918 } 870 }
919 871
920 /* Send the task management part of the reset. */ 872 /* Send the task management part of the reset. */
@@ -929,6 +881,8 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
929 isci_terminate_pending_requests(isci_host, 881 isci_terminate_pending_requests(isci_host,
930 isci_device); 882 isci_device);
931 883
884 out:
885 isci_put_device(isci_device);
932 return ret; 886 return ret;
933} 887}
934 888
@@ -1023,60 +977,54 @@ int isci_task_abort_task(struct sas_task *task)
1023 int ret = TMF_RESP_FUNC_FAILED; 977 int ret = TMF_RESP_FUNC_FAILED;
1024 unsigned long flags; 978 unsigned long flags;
1025 bool any_dev_reset = false; 979 bool any_dev_reset = false;
1026 bool device_stopping;
1027 980
1028 /* Get the isci_request reference from the task. Note that 981 /* Get the isci_request reference from the task. Note that
1029 * this check does not depend on the pending request list 982 * this check does not depend on the pending request list
1030 * in the device, because tasks driving resets may land here 983 * in the device, because tasks driving resets may land here
1031 * after completion in the core. 984 * after completion in the core.
1032 */ 985 */
1033 old_request = isci_task_get_request_from_task(task, &isci_device); 986 spin_lock_irqsave(&isci_host->scic_lock, flags);
987 spin_lock(&task->task_state_lock);
988
989 old_request = task->lldd_task;
990
991 /* If task is already done, the request isn't valid */
992 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
993 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
994 old_request)
995 isci_device = isci_lookup_device(task->dev);
996
997 spin_unlock(&task->task_state_lock);
998 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1034 999
1035 dev_dbg(&isci_host->pdev->dev, 1000 dev_dbg(&isci_host->pdev->dev,
1036 "%s: task = %p\n", __func__, task); 1001 "%s: task = %p\n", __func__, task);
1037 1002
1038 /* Check if the device has been / is currently being removed. 1003 if (!isci_device || !old_request)
1039 * If so, no task management will be done, and the I/O will 1004 goto out;
1040 * be terminated.
1041 */
1042 device_stopping = (isci_device->status == isci_stopping)
1043 || (isci_device->status == isci_stopped);
1044 1005
1045 /* XXX need to fix device lookup lifetime (needs to be done
1046 * under scic_lock, among other things...), but for now assume
1047 * the device is available like the above code
1048 */
1049 set_bit(IDEV_EH, &isci_device->flags); 1006 set_bit(IDEV_EH, &isci_device->flags);
1050 1007
1051 /* This version of the driver will fail abort requests for 1008 /* This version of the driver will fail abort requests for
1052 * SATA/STP. Failing the abort request this way will cause the 1009 * SATA/STP. Failing the abort request this way will cause the
1053 * SCSI error handler thread to escalate to LUN reset 1010 * SCSI error handler thread to escalate to LUN reset
1054 */ 1011 */
1055 if (sas_protocol_ata(task->task_proto) && !device_stopping) { 1012 if (sas_protocol_ata(task->task_proto)) {
1056 dev_warn(&isci_host->pdev->dev, 1013 dev_warn(&isci_host->pdev->dev,
1057 " task %p is for a STP/SATA device;" 1014 " task %p is for a STP/SATA device;"
1058 " returning TMF_RESP_FUNC_FAILED\n" 1015 " returning TMF_RESP_FUNC_FAILED\n"
1059 " to cause a LUN reset...\n", task); 1016 " to cause a LUN reset...\n", task);
1060 return TMF_RESP_FUNC_FAILED; 1017 goto out;
1061 } 1018 }
1062 1019
1063 dev_dbg(&isci_host->pdev->dev, 1020 dev_dbg(&isci_host->pdev->dev,
1064 "%s: old_request == %p\n", __func__, old_request); 1021 "%s: old_request == %p\n", __func__, old_request);
1065 1022
1066 if (!device_stopping) 1023 any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
1067 any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
1068 1024
1069 spin_lock_irqsave(&task->task_state_lock, flags); 1025 spin_lock_irqsave(&task->task_state_lock, flags);
1070 1026
1071 /* Don't do resets to stopping devices. */ 1027 any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1072 if (device_stopping) {
1073
1074 task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
1075 any_dev_reset = false;
1076
1077 } else /* See if there is a pending device reset for this device. */
1078 any_dev_reset = any_dev_reset
1079 || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1080 1028
1081 /* If the extraction of the request reference from the task 1029 /* If the extraction of the request reference from the task
1082 * failed, then the request has been completed (or if there is a 1030 * failed, then the request has been completed (or if there is a
@@ -1130,8 +1078,7 @@ int isci_task_abort_task(struct sas_task *task)
1130 "%s: abort task not needed for %p\n", 1078 "%s: abort task not needed for %p\n",
1131 __func__, task); 1079 __func__, task);
1132 } 1080 }
1133 1081 goto out;
1134 return ret;
1135 } 1082 }
1136 else 1083 else
1137 spin_unlock_irqrestore(&task->task_state_lock, flags); 1084 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1158,11 +1105,10 @@ int isci_task_abort_task(struct sas_task *task)
1158 "%s: device = %p; old_request %p already being aborted\n", 1105 "%s: device = %p; old_request %p already being aborted\n",
1159 __func__, 1106 __func__,
1160 isci_device, old_request); 1107 isci_device, old_request);
1161 1108 ret = TMF_RESP_FUNC_COMPLETE;
1162 return TMF_RESP_FUNC_COMPLETE; 1109 goto out;
1163 } 1110 }
1164 if ((task->task_proto == SAS_PROTOCOL_SMP) 1111 if ((task->task_proto == SAS_PROTOCOL_SMP)
1165 || device_stopping
1166 || old_request->complete_in_target 1112 || old_request->complete_in_target
1167 ) { 1113 ) {
1168 1114
@@ -1170,10 +1116,9 @@ int isci_task_abort_task(struct sas_task *task)
1170 1116
1171 dev_dbg(&isci_host->pdev->dev, 1117 dev_dbg(&isci_host->pdev->dev,
1172 "%s: SMP request (%d)" 1118 "%s: SMP request (%d)"
1173 " or device is stopping (%d)"
1174 " or complete_in_target (%d), thus no TMF\n", 1119 " or complete_in_target (%d), thus no TMF\n",
1175 __func__, (task->task_proto == SAS_PROTOCOL_SMP), 1120 __func__, (task->task_proto == SAS_PROTOCOL_SMP),
1176 device_stopping, old_request->complete_in_target); 1121 old_request->complete_in_target);
1177 1122
1178 /* Set the state on the task. */ 1123 /* Set the state on the task. */
1179 isci_task_all_done(task); 1124 isci_task_all_done(task);
@@ -1185,15 +1130,14 @@ int isci_task_abort_task(struct sas_task *task)
1185 */ 1130 */
1186 } else { 1131 } else {
1187 /* Fill in the tmf stucture */ 1132 /* Fill in the tmf stucture */
1188 isci_task_build_abort_task_tmf(&tmf, isci_device, 1133 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1189 isci_tmf_ssp_task_abort,
1190 isci_abort_task_process_cb, 1134 isci_abort_task_process_cb,
1191 old_request); 1135 old_request);
1192 1136
1193 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 1137 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1194 1138
1195 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */ 1139 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1196 ret = isci_task_execute_tmf(isci_host, &tmf, 1140 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
1197 ISCI_ABORT_TASK_TIMEOUT_MS); 1141 ISCI_ABORT_TASK_TIMEOUT_MS);
1198 1142
1199 if (ret != TMF_RESP_FUNC_COMPLETE) 1143 if (ret != TMF_RESP_FUNC_COMPLETE)
@@ -1212,6 +1156,8 @@ int isci_task_abort_task(struct sas_task *task)
1212 1156
1213 /* Make sure we do not leave a reference to aborted_io_completion */ 1157 /* Make sure we do not leave a reference to aborted_io_completion */
1214 old_request->io_request_completion = NULL; 1158 old_request->io_request_completion = NULL;
1159 out:
1160 isci_put_device(isci_device);
1215 return ret; 1161 return ret;
1216} 1162}
1217 1163
@@ -1305,7 +1251,6 @@ isci_task_request_complete(struct isci_host *ihost,
1305 struct isci_request *ireq, 1251 struct isci_request *ireq,
1306 enum sci_task_status completion_status) 1252 enum sci_task_status completion_status)
1307{ 1253{
1308 struct isci_remote_device *idev = ireq->isci_device;
1309 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 1254 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1310 struct completion *tmf_complete; 1255 struct completion *tmf_complete;
1311 struct scic_sds_request *sci_req = &ireq->sci; 1256 struct scic_sds_request *sci_req = &ireq->sci;
@@ -1332,7 +1277,7 @@ isci_task_request_complete(struct isci_host *ihost,
1332 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ 1277 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1333 tmf_complete = tmf->complete; 1278 tmf_complete = tmf->complete;
1334 1279
1335 scic_controller_complete_io(&ihost->sci, &idev->sci, &ireq->sci); 1280 scic_controller_complete_io(&ihost->sci, ireq->sci.target_device, &ireq->sci);
1336 /* set the 'terminated' flag handle to make sure it cannot be terminated 1281 /* set the 'terminated' flag handle to make sure it cannot be terminated
1337 * or completed again. 1282 * or completed again.
1338 */ 1283 */
@@ -1583,11 +1528,10 @@ static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy
1583 dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__); 1528 dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
1584} 1529}
1585 1530
1586static int isci_reset_device(struct domain_device *dev, int hard_reset) 1531static int isci_reset_device(struct isci_host *ihost,
1532 struct isci_remote_device *idev, int hard_reset)
1587{ 1533{
1588 struct isci_remote_device *idev = dev->lldd_dev; 1534 struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1589 struct sas_phy *phy = sas_find_local_phy(dev);
1590 struct isci_host *ihost = dev_to_ihost(dev);
1591 struct isci_port *iport = idev->isci_port; 1535 struct isci_port *iport = idev->isci_port;
1592 enum sci_status status; 1536 enum sci_status status;
1593 unsigned long flags; 1537 unsigned long flags;
@@ -1595,14 +1539,6 @@ static int isci_reset_device(struct domain_device *dev, int hard_reset)
1595 1539
1596 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); 1540 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1597 1541
1598 if (!idev) {
1599 dev_warn(&ihost->pdev->dev,
1600 "%s: idev is GONE!\n",
1601 __func__);
1602
1603 return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */
1604 }
1605
1606 spin_lock_irqsave(&ihost->scic_lock, flags); 1542 spin_lock_irqsave(&ihost->scic_lock, flags);
1607 status = scic_remote_device_reset(&idev->sci); 1543 status = scic_remote_device_reset(&idev->sci);
1608 if (status != SCI_SUCCESS) { 1544 if (status != SCI_SUCCESS) {
@@ -1662,35 +1598,50 @@ static int isci_reset_device(struct domain_device *dev, int hard_reset)
1662int isci_task_I_T_nexus_reset(struct domain_device *dev) 1598int isci_task_I_T_nexus_reset(struct domain_device *dev)
1663{ 1599{
1664 struct isci_host *ihost = dev_to_ihost(dev); 1600 struct isci_host *ihost = dev_to_ihost(dev);
1665 int ret = TMF_RESP_FUNC_FAILED, hard_reset = 1;
1666 struct isci_remote_device *idev; 1601 struct isci_remote_device *idev;
1602 int ret, hard_reset = 1;
1667 unsigned long flags; 1603 unsigned long flags;
1668 1604
1669 /* XXX mvsas is not protecting against ->lldd_dev_gone(), are we
1670 * being too paranoid, or is mvsas busted?!
1671 */
1672 spin_lock_irqsave(&ihost->scic_lock, flags); 1605 spin_lock_irqsave(&ihost->scic_lock, flags);
1673 idev = dev->lldd_dev; 1606 idev = isci_lookup_device(dev);
1674 if (!idev || !test_bit(IDEV_EH, &idev->flags))
1675 ret = TMF_RESP_FUNC_COMPLETE;
1676 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1607 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1677 1608
1678 if (ret == TMF_RESP_FUNC_COMPLETE) 1609 if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1679 return ret; 1610 ret = TMF_RESP_FUNC_COMPLETE;
1611 goto out;
1612 }
1680 1613
1681 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 1614 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1682 hard_reset = 0; 1615 hard_reset = 0;
1683 1616
1684 return isci_reset_device(dev, hard_reset); 1617 ret = isci_reset_device(ihost, idev, hard_reset);
1618 out:
1619 isci_put_device(idev);
1620 return ret;
1685} 1621}
1686 1622
1687int isci_bus_reset_handler(struct scsi_cmnd *cmd) 1623int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1688{ 1624{
1689 struct domain_device *dev = sdev_to_domain_dev(cmd->device); 1625 struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1690 int hard_reset = 1; 1626 struct isci_host *ihost = dev_to_ihost(dev);
1627 struct isci_remote_device *idev;
1628 int ret, hard_reset = 1;
1629 unsigned long flags;
1691 1630
1692 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 1631 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1693 hard_reset = 0; 1632 hard_reset = 0;
1694 1633
1695 return isci_reset_device(dev, hard_reset); 1634 spin_lock_irqsave(&ihost->scic_lock, flags);
1635 idev = isci_lookup_device(dev);
1636 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1637
1638 if (!idev) {
1639 ret = TMF_RESP_FUNC_COMPLETE;
1640 goto out;
1641 }
1642
1643 ret = isci_reset_device(ihost, idev, hard_reset);
1644 out:
1645 isci_put_device(idev);
1646 return ret;
1696} 1647}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index d574a18956d7..42019de23805 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -213,18 +213,15 @@ int isci_bus_reset_handler(struct scsi_cmnd *cmd);
213 213
214void isci_task_build_tmf( 214void isci_task_build_tmf(
215 struct isci_tmf *tmf, 215 struct isci_tmf *tmf,
216 struct isci_remote_device *isci_device,
217 enum isci_tmf_function_codes code, 216 enum isci_tmf_function_codes code,
218 void (*tmf_sent_cb)(enum isci_tmf_cb_state, 217 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
219 struct isci_tmf *, 218 struct isci_tmf *,
220 void *), 219 void *),
221 void *cb_data); 220 void *cb_data);
222 221
223 222int isci_task_execute_tmf(struct isci_host *isci_host,
224int isci_task_execute_tmf( 223 struct isci_remote_device *idev,
225 struct isci_host *isci_host, 224 struct isci_tmf *tmf, unsigned long timeout_ms);
226 struct isci_tmf *tmf,
227 unsigned long timeout_ms);
228 225
229/** 226/**
230 * enum isci_completion_selection - This enum defines the possible actions to 227 * enum isci_completion_selection - This enum defines the possible actions to