aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci/task.c
diff options
context:
space:
mode:
authorJeff Skirvin <jeffrey.d.skirvin@intel.com>2011-03-08 21:22:07 -0500
committerDan Williams <dan.j.williams@intel.com>2011-07-03 06:55:30 -0400
commitf0846c68912545d70da16b2fbedded37ea4394d8 (patch)
tree76bb21f978793eaa161390307c8834b87cb644a9 /drivers/scsi/isci/task.c
parent1fad9e934a43407c1ba397b1b6b8882aa8a2cafd (diff)
isci: Cleaning up task execute path.
Made sure the device ready check accounts for all states. Moved the aborted task check into the loop of pulling task requests off of the submitted list. Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com> Signed-off-by: Jacek Danecki <Jacek.Danecki@intel.com> [remove host and device starting state checks] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci/task.c')
-rw-r--r--drivers/scsi/isci/task.c141
1 files changed, 71 insertions, 70 deletions
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index e9bfc22d91d0..3dc9ef3f305b 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -83,21 +83,10 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
83 unsigned long flags; 83 unsigned long flags;
84 int ret; 84 int ret;
85 enum sci_status status; 85 enum sci_status status;
86 86 enum isci_status device_status;
87 87
88 dev_dbg(task->dev->port->ha->dev, "%s: num=%d\n", __func__, num); 88 dev_dbg(task->dev->port->ha->dev, "%s: num=%d\n", __func__, num);
89 89
90 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
91
92 isci_task_complete_for_upper_layer(
93 task,
94 SAS_TASK_UNDELIVERED,
95 SAM_STAT_TASK_ABORTED,
96 isci_perform_normal_io_completion
97 );
98
99 return 0; /* The I/O was accepted (and failed). */
100 }
101 if ((task->dev == NULL) || (task->dev->port == NULL)) { 90 if ((task->dev == NULL) || (task->dev->port == NULL)) {
102 91
103 /* Indicate SAS_TASK_UNDELIVERED, so that the scsi midlayer 92 /* Indicate SAS_TASK_UNDELIVERED, so that the scsi midlayer
@@ -143,93 +132,105 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
143 /* We don't have a valid host reference, so we 132 /* We don't have a valid host reference, so we
144 * can't control the host queueing condition. 133 * can't control the host queueing condition.
145 */ 134 */
146 continue; 135 goto next_task;
147 } 136 }
148 137
149 device = isci_dev_from_domain_dev(task->dev); 138 device = isci_dev_from_domain_dev(task->dev);
150 139
151 isci_host = isci_host_from_sas_ha(task->dev->port->ha); 140 isci_host = isci_host_from_sas_ha(task->dev->port->ha);
152 141
153 if (device && device->status == isci_ready) { 142 if (device)
143 device_status = device->status;
144 else
145 device_status = isci_freed;
146
147 /* From this point onward, any process that needs to guarantee
148 * that there is no kernel I/O being started will have to wait
149 * for the quiesce spinlock.
150 */
151
152 if (device_status != isci_ready_for_io) {
154 153
155 /* Forces a retry from scsi mid layer. */ 154 /* Forces a retry from scsi mid layer. */
156 dev_warn(task->dev->port->ha->dev, 155 dev_warn(task->dev->port->ha->dev,
157 "%s: task %p: isci_host->status = %d, " 156 "%s: task %p: isci_host->status = %d, "
158 "device = %p\n", 157 "device = %p; device_status = 0x%x\n\n",
159 __func__, 158 __func__,
160 task, 159 task,
161 isci_host_get_state(isci_host), 160 isci_host_get_state(isci_host),
162 device); 161 device, device_status);
163
164 if (device)
165 dev_dbg(task->dev->port->ha->dev,
166 "%s: device->status = 0x%x\n",
167 __func__, device->status);
168 162
169 /* Indicate QUEUE_FULL so that the scsi midlayer 163 if (device_status == isci_ready) {
170 * retries. 164 /* Indicate QUEUE_FULL so that the scsi midlayer
171 */ 165 * retries.
172 isci_task_complete_for_upper_layer( 166 */
173 task, 167 isci_task_complete_for_upper_layer(
174 SAS_TASK_COMPLETE, 168 task,
175 SAS_QUEUE_FULL, 169 SAS_TASK_COMPLETE,
176 isci_perform_normal_io_completion 170 SAS_QUEUE_FULL,
177 ); 171 isci_perform_normal_io_completion
172 );
173 } else {
174 /* Else, the device is going down. */
175 isci_task_complete_for_upper_layer(
176 task,
177 SAS_TASK_UNDELIVERED,
178 SAS_DEVICE_UNKNOWN,
179 isci_perform_normal_io_completion
180 );
181 }
178 isci_host_can_dequeue(isci_host, 1); 182 isci_host_can_dequeue(isci_host, 1);
179 } 183 } else {
180 /* the device is going down... */ 184 /* There is a device and it's ready for I/O. */
181 else if (!device || device->status != isci_ready_for_io) { 185 spin_lock_irqsave(&task->task_state_lock, flags);
182 186
183 dev_dbg(task->dev->port->ha->dev, 187 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
184 "%s: task %p: isci_host->status = %d, "
185 "device = %p\n",
186 __func__,
187 task,
188 isci_host_get_state(isci_host),
189 device);
190 188
191 if (device) 189 spin_unlock_irqrestore(&task->task_state_lock,
192 dev_dbg(task->dev->port->ha->dev, 190 flags);
193 "%s: device->status = 0x%x\n",
194 __func__, device->status);
195 191
196 /* Indicate SAS_TASK_UNDELIVERED, so that the scsi 192 isci_task_complete_for_upper_layer(
197 * midlayer removes the target. 193 task,
198 */ 194 SAS_TASK_UNDELIVERED,
199 isci_task_complete_for_upper_layer( 195 SAM_STAT_TASK_ABORTED,
200 task, 196 isci_perform_normal_io_completion
201 SAS_TASK_UNDELIVERED, 197 );
202 SAS_DEVICE_UNKNOWN,
203 isci_perform_normal_io_completion
204 );
205 isci_host_can_dequeue(isci_host, 1);
206 198
207 } else { 199 /* The I/O was aborted. */
208 /* build and send the request. */
209 status = isci_request_execute(isci_host, task, &request,
210 gfp_flags);
211 200
212 if (status == SCI_SUCCESS) { 201 } else {
213 spin_lock_irqsave(&task->task_state_lock, flags);
214 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 202 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
215 spin_unlock_irqrestore(&task->task_state_lock, flags); 203 spin_unlock_irqrestore(&task->task_state_lock, flags);
216 } else { 204
217 /* Indicate QUEUE_FULL so that the scsi 205 /* build and send the request. */
218 * midlayer retries. if the request 206 status = isci_request_execute(isci_host, task, &request,
219 * failed for remote device reasons, 207 gfp_flags);
220 * it gets returned as 208
221 * SAS_TASK_UNDELIVERED next time 209 if (status != SCI_SUCCESS) {
222 * through. 210
223 */ 211 spin_lock_irqsave(&task->task_state_lock, flags);
224 isci_task_complete_for_upper_layer( 212 /* Did not really start this command. */
213 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
214 spin_unlock_irqrestore(&task->task_state_lock, flags);
215
216 /* Indicate QUEUE_FULL so that the scsi
217 * midlayer retries. if the request
218 * failed for remote device reasons,
219 * it gets returned as
220 * SAS_TASK_UNDELIVERED next time
221 * through.
222 */
223 isci_task_complete_for_upper_layer(
225 task, 224 task,
226 SAS_TASK_COMPLETE, 225 SAS_TASK_COMPLETE,
227 SAS_QUEUE_FULL, 226 SAS_QUEUE_FULL,
228 isci_perform_normal_io_completion 227 isci_perform_normal_io_completion
229 ); 228 );
230 isci_host_can_dequeue(isci_host, 1); 229 isci_host_can_dequeue(isci_host, 1);
230 }
231 } 231 }
232 } 232 }
233next_task:
233 task = list_entry(task->list.next, struct sas_task, list); 234 task = list_entry(task->list.next, struct sas_task, list);
234 } while (--num > 0); 235 } while (--num > 0);
235 return 0; 236 return 0;