aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-06-16 14:26:12 -0400
committerDan Williams <dan.j.williams@intel.com>2011-07-03 07:04:51 -0400
commitf2088267514b39af1a94409168101527769a911c (patch)
tree9155ce7eb3edf40ecdabc690bff34622ecea1294 /drivers/scsi/isci
parent209fae14fabfd48525e5630bebbbd4ca15090c60 (diff)
isci: kill isci_remote_device_change_state()
Now that "stopping/stopped" are one in the same and signalled by a NULL device pointer the rest of the device status infrastructure can be removed (->status and ->state_lock). The "not ready for i/o state" is replaced with a state flag, and is evaluated under scic_lock so that we don't see transients from taking the device reference to submitting the i/o. This also fixes a potential leakage of can_queue slots in the rare case that SAS_TASK_ABORTED is set at submission. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci')
-rw-r--r--drivers/scsi/isci/host.c1
-rw-r--r--drivers/scsi/isci/remote_device.c50
-rw-r--r--drivers/scsi/isci/remote_device.h5
-rw-r--r--drivers/scsi/isci/task.c68
4 files changed, 27 insertions, 97 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 40f35fad244b..b08455f0d350 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -2504,7 +2504,6 @@ int isci_host_init(struct isci_host *isci_host)
2504 2504
2505 INIT_LIST_HEAD(&idev->reqs_in_process); 2505 INIT_LIST_HEAD(&idev->reqs_in_process);
2506 INIT_LIST_HEAD(&idev->node); 2506 INIT_LIST_HEAD(&idev->node);
2507 spin_lock_init(&idev->state_lock);
2508 } 2507 }
2509 2508
2510 return 0; 2509 return 0;
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index ab5f9868e4ef..c2e5c05be0cb 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -62,24 +62,6 @@
62#include "task.h" 62#include "task.h"
63 63
64/** 64/**
65 * isci_remote_device_change_state() - This function gets the status of the
66 * remote_device object.
67 * @isci_device: This parameter points to the isci_remote_device object
68 *
69 * status of the object as a isci_status enum.
70 */
71void isci_remote_device_change_state(
72 struct isci_remote_device *isci_device,
73 enum isci_status status)
74{
75 unsigned long flags;
76
77 spin_lock_irqsave(&isci_device->state_lock, flags);
78 isci_device->status = status;
79 spin_unlock_irqrestore(&isci_device->state_lock, flags);
80}
81
82/**
83 * isci_remote_device_not_ready() - This function is called by the scic when 65 * isci_remote_device_not_ready() - This function is called by the scic when
84 * the remote device is not ready. We mark the isci device as ready (not 66 * the remote device is not ready. We mark the isci device as ready (not
85 * "ready_for_io") and signal the waiting proccess. 67 * "ready_for_io") and signal the waiting proccess.
@@ -96,8 +78,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
96 if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED) 78 if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED)
97 set_bit(IDEV_GONE, &idev->flags); 79 set_bit(IDEV_GONE, &idev->flags);
98 else 80 else
99 /* device ready is actually a "not ready for io" state. */ 81 clear_bit(IDEV_IO_READY, &idev->flags);
100 isci_remote_device_change_state(idev, isci_ready);
101} 82}
102 83
103/** 84/**
@@ -113,7 +94,7 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote
113 dev_dbg(&ihost->pdev->dev, 94 dev_dbg(&ihost->pdev->dev,
114 "%s: idev = %p\n", __func__, idev); 95 "%s: idev = %p\n", __func__, idev);
115 96
116 isci_remote_device_change_state(idev, isci_ready_for_io); 97 set_bit(IDEV_IO_READY, &idev->flags);
117 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) 98 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
118 wake_up(&ihost->eventq); 99 wake_up(&ihost->eventq);
119} 100}
@@ -871,26 +852,6 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_
871 isci_put_device(idev); 852 isci_put_device(idev);
872} 853}
873 854
874/**
875 * isci_remote_device_stop_complete() - This function is called by the scic
876 * when the remote device stop has completed. We mark the isci device as not
877 * ready and remove the isci remote device.
878 * @ihost: This parameter specifies the isci host object.
879 * @idev: This parameter specifies the remote device.
880 * @status: This parameter specifies status of the completion.
881 *
882 */
883static void isci_remote_device_stop_complete(struct isci_host *ihost,
884 struct isci_remote_device *idev)
885{
886 dev_dbg(&ihost->pdev->dev, "%s: complete idev = %p\n", __func__, idev);
887
888 isci_remote_device_change_state(idev, isci_stopped);
889
890 /* after stop, we can tear down resources. */
891 isci_remote_device_deconstruct(ihost, idev);
892}
893
894static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) 855static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
895{ 856{
896 struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); 857 struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
@@ -903,7 +864,7 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac
903 */ 864 */
904 prev_state = sci_dev->sm.previous_state_id; 865 prev_state = sci_dev->sm.previous_state_id;
905 if (prev_state == SCI_DEV_STOPPING) 866 if (prev_state == SCI_DEV_STOPPING)
906 isci_remote_device_stop_complete(scic_to_ihost(scic), idev); 867 isci_remote_device_deconstruct(scic_to_ihost(scic), idev);
907 868
908 scic_sds_controller_remote_device_stopped(scic, sci_dev); 869 scic_sds_controller_remote_device_stopped(scic, sci_dev);
909} 870}
@@ -1301,8 +1262,6 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1301 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) 1262 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1302 return NULL; 1263 return NULL;
1303 1264
1304 isci_remote_device_change_state(idev, isci_freed);
1305
1306 return idev; 1265 return idev;
1307} 1266}
1308 1267
@@ -1315,6 +1274,7 @@ void isci_remote_device_release(struct kref *kref)
1315 idev->isci_port = NULL; 1274 idev->isci_port = NULL;
1316 clear_bit(IDEV_START_PENDING, &idev->flags); 1275 clear_bit(IDEV_START_PENDING, &idev->flags);
1317 clear_bit(IDEV_STOP_PENDING, &idev->flags); 1276 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1277 clear_bit(IDEV_IO_READY, &idev->flags);
1318 clear_bit(IDEV_GONE, &idev->flags); 1278 clear_bit(IDEV_GONE, &idev->flags);
1319 clear_bit(IDEV_EH, &idev->flags); 1279 clear_bit(IDEV_EH, &idev->flags);
1320 smp_mb__before_clear_bit(); 1280 smp_mb__before_clear_bit();
@@ -1341,7 +1301,6 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
1341 spin_lock_irqsave(&ihost->scic_lock, flags); 1301 spin_lock_irqsave(&ihost->scic_lock, flags);
1342 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ 1302 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1343 set_bit(IDEV_GONE, &idev->flags); 1303 set_bit(IDEV_GONE, &idev->flags);
1344 isci_remote_device_change_state(idev, isci_stopping);
1345 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1304 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1346 1305
1347 /* Kill all outstanding requests. */ 1306 /* Kill all outstanding requests. */
@@ -1430,7 +1389,6 @@ int isci_remote_device_found(struct domain_device *domain_dev)
1430 spin_lock_irq(&isci_host->scic_lock); 1389 spin_lock_irq(&isci_host->scic_lock);
1431 isci_device->domain_dev = domain_dev; 1390 isci_device->domain_dev = domain_dev;
1432 isci_device->isci_port = isci_port; 1391 isci_device->isci_port = isci_port;
1433 isci_remote_device_change_state(isci_device, isci_starting);
1434 list_add_tail(&isci_device->node, &isci_port->remote_dev_list); 1392 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1435 1393
1436 set_bit(IDEV_START_PENDING, &isci_device->flags); 1394 set_bit(IDEV_START_PENDING, &isci_device->flags);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 05842b5f1e3b..33f011447312 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -130,19 +130,18 @@ struct scic_sds_remote_device {
130}; 130};
131 131
132struct isci_remote_device { 132struct isci_remote_device {
133 enum isci_status status;
134 #define IDEV_START_PENDING 0 133 #define IDEV_START_PENDING 0
135 #define IDEV_STOP_PENDING 1 134 #define IDEV_STOP_PENDING 1
136 #define IDEV_ALLOCATED 2 135 #define IDEV_ALLOCATED 2
137 #define IDEV_EH 3 136 #define IDEV_EH 3
138 #define IDEV_GONE 4 137 #define IDEV_GONE 4
138 #define IDEV_IO_READY 5
139 unsigned long flags; 139 unsigned long flags;
140 struct kref kref; 140 struct kref kref;
141 struct isci_port *isci_port; 141 struct isci_port *isci_port;
142 struct domain_device *domain_dev; 142 struct domain_device *domain_dev;
143 struct list_head node; 143 struct list_head node;
144 struct list_head reqs_in_process; 144 struct list_head reqs_in_process;
145 spinlock_t state_lock;
146 struct scic_sds_remote_device sci; 145 struct scic_sds_remote_device sci;
147}; 146};
148 147
@@ -178,8 +177,6 @@ bool isci_device_is_reset_pending(struct isci_host *ihost,
178 struct isci_remote_device *idev); 177 struct isci_remote_device *idev);
179void isci_device_clear_reset_pending(struct isci_host *ihost, 178void isci_device_clear_reset_pending(struct isci_host *ihost,
180 struct isci_remote_device *idev); 179 struct isci_remote_device *idev);
181void isci_remote_device_change_state(struct isci_remote_device *idev,
182 enum isci_status status);
183/** 180/**
184 * scic_remote_device_stop() - This method will stop both transmission and 181 * scic_remote_device_stop() - This method will stop both transmission and
185 * reception of link activity for the supplied remote device. This method 182 * reception of link activity for the supplied remote device. This method
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index dd5e9de1ffb5..c313bc16c218 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -147,10 +147,10 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
147{ 147{
148 struct isci_host *ihost = dev_to_ihost(task->dev); 148 struct isci_host *ihost = dev_to_ihost(task->dev);
149 struct isci_remote_device *idev; 149 struct isci_remote_device *idev;
150 enum sci_status status;
150 unsigned long flags; 151 unsigned long flags;
152 bool io_ready;
151 int ret; 153 int ret;
152 enum sci_status status;
153 enum isci_status device_status;
154 154
155 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); 155 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
156 156
@@ -163,64 +163,40 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
163 } 163 }
164 164
165 for_each_sas_task(num, task) { 165 for_each_sas_task(num, task) {
166 dev_dbg(&ihost->pdev->dev,
167 "task = %p, num = %d; dev = %p; cmd = %p\n",
168 task, num, task->dev, task->uldd_task);
169 spin_lock_irqsave(&ihost->scic_lock, flags); 166 spin_lock_irqsave(&ihost->scic_lock, flags);
170 idev = isci_lookup_device(task->dev); 167 idev = isci_lookup_device(task->dev);
168 io_ready = idev ? test_bit(IDEV_IO_READY, &idev->flags) : 0;
171 spin_unlock_irqrestore(&ihost->scic_lock, flags); 169 spin_unlock_irqrestore(&ihost->scic_lock, flags);
172 170
173 if (idev) 171 dev_dbg(&ihost->pdev->dev,
174 device_status = idev->status; 172 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
175 else 173 task, num, task->dev, idev, idev ? idev->flags : 0,
176 device_status = isci_freed; 174 task->uldd_task);
177
178 /* From this point onward, any process that needs to guarantee
179 * that there is no kernel I/O being started will have to wait
180 * for the quiesce spinlock.
181 */
182
183 if (device_status != isci_ready_for_io) {
184 175
185 /* Forces a retry from scsi mid layer. */ 176 if (!idev) {
186 dev_dbg(&ihost->pdev->dev, 177 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
187 "%s: task %p: isci_host->status = %d, " 178 SAS_DEVICE_UNKNOWN);
188 "device = %p; device_status = 0x%x\n\n", 179 isci_host_can_dequeue(ihost, 1);
189 __func__, 180 } else if (!io_ready) {
190 task, 181 /* Indicate QUEUE_FULL so that the scsi midlayer
191 isci_host_get_state(ihost), 182 * retries.
192 idev, 183 */
193 device_status); 184 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
194 185 SAS_QUEUE_FULL);
195 if (device_status == isci_ready) {
196 /* Indicate QUEUE_FULL so that the scsi midlayer
197 * retries.
198 */
199 isci_task_refuse(ihost, task,
200 SAS_TASK_COMPLETE,
201 SAS_QUEUE_FULL);
202 } else {
203 /* Else, the device is going down. */
204 isci_task_refuse(ihost, task,
205 SAS_TASK_UNDELIVERED,
206 SAS_DEVICE_UNKNOWN);
207 }
208 isci_host_can_dequeue(ihost, 1); 186 isci_host_can_dequeue(ihost, 1);
209 } else { 187 } else {
210 /* There is a device and it's ready for I/O. */ 188 /* There is a device and it's ready for I/O. */
211 spin_lock_irqsave(&task->task_state_lock, flags); 189 spin_lock_irqsave(&task->task_state_lock, flags);
212 190
213 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 191 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
214 192 /* The I/O was aborted. */
215 spin_unlock_irqrestore(&task->task_state_lock, 193 spin_unlock_irqrestore(&task->task_state_lock,
216 flags); 194 flags);
217 195
218 isci_task_refuse(ihost, task, 196 isci_task_refuse(ihost, task,
219 SAS_TASK_UNDELIVERED, 197 SAS_TASK_UNDELIVERED,
220 SAM_STAT_TASK_ABORTED); 198 SAM_STAT_TASK_ABORTED);
221 199 isci_host_can_dequeue(ihost, 1);
222 /* The I/O was aborted. */
223
224 } else { 200 } else {
225 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 201 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
226 spin_unlock_irqrestore(&task->task_state_lock, flags); 202 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -323,11 +299,11 @@ int isci_task_execute_tmf(struct isci_host *ihost,
323 /* sanity check, return TMF_RESP_FUNC_FAILED 299 /* sanity check, return TMF_RESP_FUNC_FAILED
324 * if the device is not there and ready. 300 * if the device is not there and ready.
325 */ 301 */
326 if (!isci_device || isci_device->status != isci_ready_for_io) { 302 if (!isci_device || !test_bit(IDEV_IO_READY, &isci_device->flags)) {
327 dev_dbg(&ihost->pdev->dev, 303 dev_dbg(&ihost->pdev->dev,
328 "%s: isci_device = %p not ready (%d)\n", 304 "%s: isci_device = %p not ready (%#lx)\n",
329 __func__, 305 __func__,
330 isci_device, isci_device->status); 306 isci_device, isci_device ? isci_device->flags : 0);
331 return TMF_RESP_FUNC_FAILED; 307 return TMF_RESP_FUNC_FAILED;
332 } else 308 } else
333 dev_dbg(&ihost->pdev->dev, 309 dev_dbg(&ihost->pdev->dev,