aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci/task.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-06-28 16:47:09 -0400
committerDan Williams <dan.j.williams@intel.com>2011-07-03 07:04:51 -0400
commit312e0c2455c18716cf640d4336dcb1e9e5053818 (patch)
treebe2dbc9a3e5ba39783448f0029231ea43e6e0428 /drivers/scsi/isci/task.c
parent9274f45ea551421cd3bf329de9dd8d1e6208285a (diff)
isci: unify can_queue tracking on the tci_pool, uplevel tag assignment
The tci_pool tracks our outstanding command slots which are also the 'index' portion of our tags. Grabbing the tag early in ->lldd_execute_task let's us drop the isci_host_can_queue() and ->was_tag_assigned_by_user infrastructure. ->was_tag_assigned_by_user required the task context to be duplicated in request-local buffer. With the tci established early we can build the task_context directly into its final location and skip a memcpy. With the task context buffer at a known address at request construction we have the opportunity/obligation to also fix sgl handling. This rework feels like it belongs in another patch but the sgl handling and task_context are too intertwined. 1/ fix the 'ab' pair embedded in the task context to point to the 'cd' pair in the task context (previously we were prematurely linking to the staging buffer). 2/ fix the broken iteration of pio sgls that assumes all sgls are relative to the request, and does a dangerous looking reverse lookup of physical address to virtual address. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci/task.c')
-rw-r--r--drivers/scsi/isci/task.c80
1 files changed, 43 insertions, 37 deletions
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 157e9978183a..22f6fe171111 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -63,6 +63,7 @@
63#include "request.h" 63#include "request.h"
64#include "sata.h" 64#include "sata.h"
65#include "task.h" 65#include "task.h"
66#include "host.h"
66 67
67/** 68/**
68* isci_task_refuse() - complete the request to the upper layer driver in 69* isci_task_refuse() - complete the request to the upper layer driver in
@@ -156,25 +157,19 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
156{ 157{
157 struct isci_host *ihost = dev_to_ihost(task->dev); 158 struct isci_host *ihost = dev_to_ihost(task->dev);
158 struct isci_remote_device *idev; 159 struct isci_remote_device *idev;
159 enum sci_status status;
160 unsigned long flags; 160 unsigned long flags;
161 bool io_ready; 161 bool io_ready;
162 int ret; 162 u16 tag;
163 163
164 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); 164 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
165 165
166 /* Check if we have room for more tasks */
167 ret = isci_host_can_queue(ihost, num);
168
169 if (ret) {
170 dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__);
171 return ret;
172 }
173
174 for_each_sas_task(num, task) { 166 for_each_sas_task(num, task) {
167 enum sci_status status = SCI_FAILURE;
168
175 spin_lock_irqsave(&ihost->scic_lock, flags); 169 spin_lock_irqsave(&ihost->scic_lock, flags);
176 idev = isci_lookup_device(task->dev); 170 idev = isci_lookup_device(task->dev);
177 io_ready = isci_device_io_ready(idev, task); 171 io_ready = isci_device_io_ready(idev, task);
172 tag = isci_alloc_tag(ihost);
178 spin_unlock_irqrestore(&ihost->scic_lock, flags); 173 spin_unlock_irqrestore(&ihost->scic_lock, flags);
179 174
180 dev_dbg(&ihost->pdev->dev, 175 dev_dbg(&ihost->pdev->dev,
@@ -185,15 +180,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
185 if (!idev) { 180 if (!idev) {
186 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, 181 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
187 SAS_DEVICE_UNKNOWN); 182 SAS_DEVICE_UNKNOWN);
188 isci_host_can_dequeue(ihost, 1); 183 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
189 } else if (!io_ready) {
190
191 /* Indicate QUEUE_FULL so that the scsi midlayer 184 /* Indicate QUEUE_FULL so that the scsi midlayer
192 * retries. 185 * retries.
193 */ 186 */
194 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, 187 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
195 SAS_QUEUE_FULL); 188 SAS_QUEUE_FULL);
196 isci_host_can_dequeue(ihost, 1);
197 } else { 189 } else {
198 /* There is a device and it's ready for I/O. */ 190 /* There is a device and it's ready for I/O. */
199 spin_lock_irqsave(&task->task_state_lock, flags); 191 spin_lock_irqsave(&task->task_state_lock, flags);
@@ -206,13 +198,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
206 isci_task_refuse(ihost, task, 198 isci_task_refuse(ihost, task,
207 SAS_TASK_UNDELIVERED, 199 SAS_TASK_UNDELIVERED,
208 SAM_STAT_TASK_ABORTED); 200 SAM_STAT_TASK_ABORTED);
209 isci_host_can_dequeue(ihost, 1);
210 } else { 201 } else {
211 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 202 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
212 spin_unlock_irqrestore(&task->task_state_lock, flags); 203 spin_unlock_irqrestore(&task->task_state_lock, flags);
213 204
214 /* build and send the request. */ 205 /* build and send the request. */
215 status = isci_request_execute(ihost, idev, task, gfp_flags); 206 status = isci_request_execute(ihost, idev, task, tag, gfp_flags);
216 207
217 if (status != SCI_SUCCESS) { 208 if (status != SCI_SUCCESS) {
218 209
@@ -231,10 +222,17 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
231 isci_task_refuse(ihost, task, 222 isci_task_refuse(ihost, task,
232 SAS_TASK_COMPLETE, 223 SAS_TASK_COMPLETE,
233 SAS_QUEUE_FULL); 224 SAS_QUEUE_FULL);
234 isci_host_can_dequeue(ihost, 1);
235 } 225 }
236 } 226 }
237 } 227 }
228 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
229 spin_lock_irqsave(&ihost->scic_lock, flags);
230 /* command never hit the device, so just free
231 * the tci and skip the sequence increment
232 */
233 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
234 spin_unlock_irqrestore(&ihost->scic_lock, flags);
235 }
238 isci_put_device(idev); 236 isci_put_device(idev);
239 } 237 }
240 return 0; 238 return 0;
@@ -242,7 +240,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
242 240
243static struct isci_request *isci_task_request_build(struct isci_host *ihost, 241static struct isci_request *isci_task_request_build(struct isci_host *ihost,
244 struct isci_remote_device *idev, 242 struct isci_remote_device *idev,
245 struct isci_tmf *isci_tmf) 243 u16 tag, struct isci_tmf *isci_tmf)
246{ 244{
247 enum sci_status status = SCI_FAILURE; 245 enum sci_status status = SCI_FAILURE;
248 struct isci_request *ireq = NULL; 246 struct isci_request *ireq = NULL;
@@ -259,8 +257,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
259 return NULL; 257 return NULL;
260 258
261 /* let the core do it's construct. */ 259 /* let the core do it's construct. */
262 status = scic_task_request_construct(&ihost->sci, &idev->sci, 260 status = scic_task_request_construct(&ihost->sci, &idev->sci, tag,
263 SCI_CONTROLLER_INVALID_IO_TAG,
264 &ireq->sci); 261 &ireq->sci);
265 262
266 if (status != SCI_SUCCESS) { 263 if (status != SCI_SUCCESS) {
@@ -290,8 +287,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
290 return ireq; 287 return ireq;
291 errout: 288 errout:
292 isci_request_free(ihost, ireq); 289 isci_request_free(ihost, ireq);
293 ireq = NULL; 290 return NULL;
294 return ireq;
295} 291}
296 292
297int isci_task_execute_tmf(struct isci_host *ihost, 293int isci_task_execute_tmf(struct isci_host *ihost,
@@ -305,6 +301,14 @@ int isci_task_execute_tmf(struct isci_host *ihost,
305 int ret = TMF_RESP_FUNC_FAILED; 301 int ret = TMF_RESP_FUNC_FAILED;
306 unsigned long flags; 302 unsigned long flags;
307 unsigned long timeleft; 303 unsigned long timeleft;
304 u16 tag;
305
306 spin_lock_irqsave(&ihost->scic_lock, flags);
307 tag = isci_alloc_tag(ihost);
308 spin_unlock_irqrestore(&ihost->scic_lock, flags);
309
310 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
311 return ret;
308 312
309 /* sanity check, return TMF_RESP_FUNC_FAILED 313 /* sanity check, return TMF_RESP_FUNC_FAILED
310 * if the device is not there and ready. 314 * if the device is not there and ready.
@@ -316,7 +320,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
316 "%s: isci_device = %p not ready (%#lx)\n", 320 "%s: isci_device = %p not ready (%#lx)\n",
317 __func__, 321 __func__,
318 isci_device, isci_device ? isci_device->flags : 0); 322 isci_device, isci_device ? isci_device->flags : 0);
319 return TMF_RESP_FUNC_FAILED; 323 goto err_tci;
320 } else 324 } else
321 dev_dbg(&ihost->pdev->dev, 325 dev_dbg(&ihost->pdev->dev,
322 "%s: isci_device = %p\n", 326 "%s: isci_device = %p\n",
@@ -327,22 +331,16 @@ int isci_task_execute_tmf(struct isci_host *ihost,
327 /* Assign the pointer to the TMF's completion kernel wait structure. */ 331 /* Assign the pointer to the TMF's completion kernel wait structure. */
328 tmf->complete = &completion; 332 tmf->complete = &completion;
329 333
330 ireq = isci_task_request_build(ihost, isci_device, tmf); 334 ireq = isci_task_request_build(ihost, isci_device, tag, tmf);
331 if (!ireq) { 335 if (!ireq)
332 dev_warn(&ihost->pdev->dev, 336 goto err_tci;
333 "%s: isci_task_request_build failed\n",
334 __func__);
335 return TMF_RESP_FUNC_FAILED;
336 }
337 337
338 spin_lock_irqsave(&ihost->scic_lock, flags); 338 spin_lock_irqsave(&ihost->scic_lock, flags);
339 339
340 /* start the TMF io. */ 340 /* start the TMF io. */
341 status = scic_controller_start_task( 341 status = scic_controller_start_task(&ihost->sci,
342 &ihost->sci, 342 sci_device,
343 sci_device, 343 &ireq->sci);
344 &ireq->sci,
345 SCI_CONTROLLER_INVALID_IO_TAG);
346 344
347 if (status != SCI_TASK_SUCCESS) { 345 if (status != SCI_TASK_SUCCESS) {
348 dev_warn(&ihost->pdev->dev, 346 dev_warn(&ihost->pdev->dev,
@@ -351,8 +349,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
351 status, 349 status,
352 ireq); 350 ireq);
353 spin_unlock_irqrestore(&ihost->scic_lock, flags); 351 spin_unlock_irqrestore(&ihost->scic_lock, flags);
354 isci_request_free(ihost, ireq); 352 goto err_ireq;
355 return ret;
356 } 353 }
357 354
358 if (tmf->cb_state_func != NULL) 355 if (tmf->cb_state_func != NULL)
@@ -403,6 +400,15 @@ int isci_task_execute_tmf(struct isci_host *ihost,
403 ireq); 400 ireq);
404 401
405 return ret; 402 return ret;
403
404 err_ireq:
405 isci_request_free(ihost, ireq);
406 err_tci:
407 spin_lock_irqsave(&ihost->scic_lock, flags);
408 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
409 spin_unlock_irqrestore(&ihost->scic_lock, flags);
410
411 return ret;
406} 412}
407 413
408void isci_task_build_tmf( 414void isci_task_build_tmf(