diff options
author | Dan Williams <dan.j.williams@intel.com> | 2011-06-28 16:47:09 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2011-07-03 07:04:51 -0400 |
commit | 312e0c2455c18716cf640d4336dcb1e9e5053818 (patch) | |
tree | be2dbc9a3e5ba39783448f0029231ea43e6e0428 /drivers/scsi/isci/request.c | |
parent | 9274f45ea551421cd3bf329de9dd8d1e6208285a (diff) |
isci: unify can_queue tracking on the tci_pool, uplevel tag assignment
The tci_pool tracks our outstanding command slots which are also the 'index'
portion of our tags. Grabbing the tag early in ->lldd_execute_task let's us
drop the isci_host_can_queue() and ->was_tag_assigned_by_user infrastructure.
->was_tag_assigned_by_user required the task context to be duplicated in
request-local buffer. With the tci established early we can build the
task_context directly into its final location and skip a memcpy.
With the task context buffer at a known address at request construction we
have the opportunity/obligation to also fix sgl handling. This rework feels
like it belongs in another patch but the sgl handling and task_context are too
intertwined.
1/ fix the 'ab' pair embedded in the task context to point to the 'cd' pair in
the task context (previously we were prematurely linking to the staging
buffer).
2/ fix the broken iteration of pio sgls that assumes all sgls are relative to
the request, and does a dangerous looking reverse lookup of physical
address to virtual address.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci/request.c')
-rw-r--r-- | drivers/scsi/isci/request.c | 469 |
1 files changed, 163 insertions, 306 deletions
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 08a7340b33bf..55859d5331b1 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
@@ -61,42 +61,50 @@ | |||
61 | #include "scu_event_codes.h" | 61 | #include "scu_event_codes.h" |
62 | #include "sas.h" | 62 | #include "sas.h" |
63 | 63 | ||
64 | /** | 64 | static struct scu_sgl_element_pair *to_sgl_element_pair(struct scic_sds_request *sci_req, |
65 | * This method returns the sgl element pair for the specificed sgl_pair index. | 65 | int idx) |
66 | * @sci_req: This parameter specifies the IO request for which to retrieve | 66 | { |
67 | * the Scatter-Gather List element pair. | 67 | if (idx == 0) |
68 | * @sgl_pair_index: This parameter specifies the index into the SGL element | 68 | return &sci_req->tc->sgl_pair_ab; |
69 | * pair to be retrieved. | 69 | else if (idx == 1) |
70 | * | 70 | return &sci_req->tc->sgl_pair_cd; |
71 | * This method returns a pointer to an struct scu_sgl_element_pair. | 71 | else if (idx < 0) |
72 | */ | 72 | return NULL; |
73 | static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( | 73 | else |
74 | struct scic_sds_request *sci_req, | 74 | return &sci_req->sg_table[idx - 2]; |
75 | u32 sgl_pair_index | 75 | } |
76 | ) { | ||
77 | struct scu_task_context *task_context; | ||
78 | 76 | ||
79 | task_context = (struct scu_task_context *)sci_req->task_context_buffer; | 77 | static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic, |
78 | struct scic_sds_request *sci_req, u32 idx) | ||
79 | { | ||
80 | u32 offset; | ||
80 | 81 | ||
81 | if (sgl_pair_index == 0) { | 82 | if (idx == 0) { |
82 | return &task_context->sgl_pair_ab; | 83 | offset = (void *) &sci_req->tc->sgl_pair_ab - |
83 | } else if (sgl_pair_index == 1) { | 84 | (void *) &scic->task_context_table[0]; |
84 | return &task_context->sgl_pair_cd; | 85 | return scic->task_context_dma + offset; |
86 | } else if (idx == 1) { | ||
87 | offset = (void *) &sci_req->tc->sgl_pair_cd - | ||
88 | (void *) &scic->task_context_table[0]; | ||
89 | return scic->task_context_dma + offset; | ||
85 | } | 90 | } |
86 | 91 | ||
87 | return &sci_req->sg_table[sgl_pair_index - 2]; | 92 | return scic_io_request_get_dma_addr(sci_req, &sci_req->sg_table[idx - 2]); |
93 | } | ||
94 | |||
95 | static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) | ||
96 | { | ||
97 | e->length = sg_dma_len(sg); | ||
98 | e->address_upper = upper_32_bits(sg_dma_address(sg)); | ||
99 | e->address_lower = lower_32_bits(sg_dma_address(sg)); | ||
100 | e->address_modifier = 0; | ||
88 | } | 101 | } |
89 | 102 | ||
90 | /** | ||
91 | * This function will build the SGL list for an IO request. | ||
92 | * @sci_req: This parameter specifies the IO request for which to build | ||
93 | * the Scatter-Gather List. | ||
94 | * | ||
95 | */ | ||
96 | static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) | 103 | static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) |
97 | { | 104 | { |
98 | struct isci_request *isci_request = sci_req_to_ireq(sds_request); | 105 | struct isci_request *isci_request = sci_req_to_ireq(sds_request); |
99 | struct isci_host *isci_host = isci_request->isci_host; | 106 | struct isci_host *isci_host = isci_request->isci_host; |
107 | struct scic_sds_controller *scic = &isci_host->sci; | ||
100 | struct sas_task *task = isci_request_access_task(isci_request); | 108 | struct sas_task *task = isci_request_access_task(isci_request); |
101 | struct scatterlist *sg = NULL; | 109 | struct scatterlist *sg = NULL; |
102 | dma_addr_t dma_addr; | 110 | dma_addr_t dma_addr; |
@@ -108,25 +116,19 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) | |||
108 | sg = task->scatter; | 116 | sg = task->scatter; |
109 | 117 | ||
110 | while (sg) { | 118 | while (sg) { |
111 | scu_sg = scic_sds_request_get_sgl_element_pair( | 119 | scu_sg = to_sgl_element_pair(sds_request, sg_idx); |
112 | sds_request, | 120 | init_sgl_element(&scu_sg->A, sg); |
113 | sg_idx); | ||
114 | |||
115 | SCU_SGL_COPY(scu_sg->A, sg); | ||
116 | |||
117 | sg = sg_next(sg); | 121 | sg = sg_next(sg); |
118 | |||
119 | if (sg) { | 122 | if (sg) { |
120 | SCU_SGL_COPY(scu_sg->B, sg); | 123 | init_sgl_element(&scu_sg->B, sg); |
121 | sg = sg_next(sg); | 124 | sg = sg_next(sg); |
122 | } else | 125 | } else |
123 | SCU_SGL_ZERO(scu_sg->B); | 126 | memset(&scu_sg->B, 0, sizeof(scu_sg->B)); |
124 | 127 | ||
125 | if (prev_sg) { | 128 | if (prev_sg) { |
126 | dma_addr = | 129 | dma_addr = to_sgl_element_pair_dma(scic, |
127 | scic_io_request_get_dma_addr( | 130 | sds_request, |
128 | sds_request, | 131 | sg_idx); |
129 | scu_sg); | ||
130 | 132 | ||
131 | prev_sg->next_pair_upper = | 133 | prev_sg->next_pair_upper = |
132 | upper_32_bits(dma_addr); | 134 | upper_32_bits(dma_addr); |
@@ -138,8 +140,7 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) | |||
138 | sg_idx++; | 140 | sg_idx++; |
139 | } | 141 | } |
140 | } else { /* handle when no sg */ | 142 | } else { /* handle when no sg */ |
141 | scu_sg = scic_sds_request_get_sgl_element_pair(sds_request, | 143 | scu_sg = to_sgl_element_pair(sds_request, sg_idx); |
142 | sg_idx); | ||
143 | 144 | ||
144 | dma_addr = dma_map_single(&isci_host->pdev->dev, | 145 | dma_addr = dma_map_single(&isci_host->pdev->dev, |
145 | task->scatter, | 146 | task->scatter, |
@@ -246,35 +247,12 @@ static void scu_ssp_reqeust_construct_task_context( | |||
246 | /* task_context->type.ssp.tag = sci_req->io_tag; */ | 247 | /* task_context->type.ssp.tag = sci_req->io_tag; */ |
247 | task_context->task_phase = 0x01; | 248 | task_context->task_phase = 0x01; |
248 | 249 | ||
249 | if (sds_request->was_tag_assigned_by_user) { | 250 | sds_request->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
250 | /* | 251 | (scic_sds_controller_get_protocol_engine_group(controller) << |
251 | * Build the task context now since we have already read | 252 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
252 | * the data | 253 | (scic_sds_port_get_index(target_port) << |
253 | */ | 254 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
254 | sds_request->post_context = | 255 | ISCI_TAG_TCI(sds_request->io_tag)); |
255 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
256 | (scic_sds_controller_get_protocol_engine_group( | ||
257 | controller) << | ||
258 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
259 | (scic_sds_port_get_index(target_port) << | ||
260 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | ||
261 | ISCI_TAG_TCI(sds_request->io_tag)); | ||
262 | } else { | ||
263 | /* | ||
264 | * Build the task context now since we have already read | ||
265 | * the data | ||
266 | * | ||
267 | * I/O tag index is not assigned because we have to wait | ||
268 | * until we get a TCi | ||
269 | */ | ||
270 | sds_request->post_context = | ||
271 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
272 | (scic_sds_controller_get_protocol_engine_group( | ||
273 | owning_controller) << | ||
274 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
275 | (scic_sds_port_get_index(target_port) << | ||
276 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); | ||
277 | } | ||
278 | 256 | ||
279 | /* | 257 | /* |
280 | * Copy the physical address for the command buffer to the | 258 | * Copy the physical address for the command buffer to the |
@@ -302,14 +280,11 @@ static void scu_ssp_reqeust_construct_task_context( | |||
302 | * @sci_req: | 280 | * @sci_req: |
303 | * | 281 | * |
304 | */ | 282 | */ |
305 | static void scu_ssp_io_request_construct_task_context( | 283 | static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *sci_req, |
306 | struct scic_sds_request *sci_req, | 284 | enum dma_data_direction dir, |
307 | enum dma_data_direction dir, | 285 | u32 len) |
308 | u32 len) | ||
309 | { | 286 | { |
310 | struct scu_task_context *task_context; | 287 | struct scu_task_context *task_context = sci_req->tc; |
311 | |||
312 | task_context = scic_sds_request_get_task_context(sci_req); | ||
313 | 288 | ||
314 | scu_ssp_reqeust_construct_task_context(sci_req, task_context); | 289 | scu_ssp_reqeust_construct_task_context(sci_req, task_context); |
315 | 290 | ||
@@ -347,12 +322,9 @@ static void scu_ssp_io_request_construct_task_context( | |||
347 | * constructed. | 322 | * constructed. |
348 | * | 323 | * |
349 | */ | 324 | */ |
350 | static void scu_ssp_task_request_construct_task_context( | 325 | static void scu_ssp_task_request_construct_task_context(struct scic_sds_request *sci_req) |
351 | struct scic_sds_request *sci_req) | ||
352 | { | 326 | { |
353 | struct scu_task_context *task_context; | 327 | struct scu_task_context *task_context = sci_req->tc; |
354 | |||
355 | task_context = scic_sds_request_get_task_context(sci_req); | ||
356 | 328 | ||
357 | scu_ssp_reqeust_construct_task_context(sci_req, task_context); | 329 | scu_ssp_reqeust_construct_task_context(sci_req, task_context); |
358 | 330 | ||
@@ -421,35 +393,12 @@ static void scu_sata_reqeust_construct_task_context( | |||
421 | /* Set the first word of the H2D REG FIS */ | 393 | /* Set the first word of the H2D REG FIS */ |
422 | task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; | 394 | task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; |
423 | 395 | ||
424 | if (sci_req->was_tag_assigned_by_user) { | 396 | sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
425 | /* | 397 | (scic_sds_controller_get_protocol_engine_group(controller) << |
426 | * Build the task context now since we have already read | 398 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
427 | * the data | 399 | (scic_sds_port_get_index(target_port) << |
428 | */ | 400 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
429 | sci_req->post_context = | 401 | ISCI_TAG_TCI(sci_req->io_tag)); |
430 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
431 | (scic_sds_controller_get_protocol_engine_group( | ||
432 | controller) << | ||
433 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
434 | (scic_sds_port_get_index(target_port) << | ||
435 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | ||
436 | ISCI_TAG_TCI(sci_req->io_tag)); | ||
437 | } else { | ||
438 | /* | ||
439 | * Build the task context now since we have already read | ||
440 | * the data. | ||
441 | * I/O tag index is not assigned because we have to wait | ||
442 | * until we get a TCi. | ||
443 | */ | ||
444 | sci_req->post_context = | ||
445 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
446 | (scic_sds_controller_get_protocol_engine_group( | ||
447 | controller) << | ||
448 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
449 | (scic_sds_port_get_index(target_port) << | ||
450 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); | ||
451 | } | ||
452 | |||
453 | /* | 402 | /* |
454 | * Copy the physical address for the command buffer to the SCU Task | 403 | * Copy the physical address for the command buffer to the SCU Task |
455 | * Context. We must offset the command buffer by 4 bytes because the | 404 | * Context. We must offset the command buffer by 4 bytes because the |
@@ -467,22 +416,9 @@ static void scu_sata_reqeust_construct_task_context( | |||
467 | task_context->response_iu_lower = 0; | 416 | task_context->response_iu_lower = 0; |
468 | } | 417 | } |
469 | 418 | ||
470 | 419 | static void scu_stp_raw_request_construct_task_context(struct scic_sds_request *sci_req) | |
471 | |||
472 | /** | ||
473 | * scu_stp_raw_request_construct_task_context - | ||
474 | * @sci_req: This parameter specifies the STP request object for which to | ||
475 | * construct a RAW command frame task context. | ||
476 | * @task_context: This parameter specifies the SCU specific task context buffer | ||
477 | * to construct. | ||
478 | * | ||
479 | * This method performs the operations common to all SATA/STP requests | ||
480 | * utilizing the raw frame method. none | ||
481 | */ | ||
482 | static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req, | ||
483 | struct scu_task_context *task_context) | ||
484 | { | 420 | { |
485 | struct scic_sds_request *sci_req = to_sci_req(stp_req); | 421 | struct scu_task_context *task_context = sci_req->tc; |
486 | 422 | ||
487 | scu_sata_reqeust_construct_task_context(sci_req, task_context); | 423 | scu_sata_reqeust_construct_task_context(sci_req, task_context); |
488 | 424 | ||
@@ -500,8 +436,7 @@ scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, | |||
500 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; | 436 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; |
501 | struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; | 437 | struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; |
502 | 438 | ||
503 | scu_stp_raw_request_construct_task_context(stp_req, | 439 | scu_stp_raw_request_construct_task_context(sci_req); |
504 | sci_req->task_context_buffer); | ||
505 | 440 | ||
506 | pio->current_transfer_bytes = 0; | 441 | pio->current_transfer_bytes = 0; |
507 | pio->ending_error = 0; | 442 | pio->ending_error = 0; |
@@ -512,13 +447,10 @@ scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, | |||
512 | 447 | ||
513 | if (copy_rx_frame) { | 448 | if (copy_rx_frame) { |
514 | scic_sds_request_build_sgl(sci_req); | 449 | scic_sds_request_build_sgl(sci_req); |
515 | /* Since the IO request copy of the TC contains the same data as | 450 | pio->request_current.sgl_index = 0; |
516 | * the actual TC this pointer is vaild for either. | ||
517 | */ | ||
518 | pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab; | ||
519 | } else { | 451 | } else { |
520 | /* The user does not want the data copied to the SGL buffer location */ | 452 | /* The user does not want the data copied to the SGL buffer location */ |
521 | pio->request_current.sgl_pair = NULL; | 453 | pio->request_current.sgl_index = -1; |
522 | } | 454 | } |
523 | 455 | ||
524 | return SCI_SUCCESS; | 456 | return SCI_SUCCESS; |
@@ -541,7 +473,7 @@ static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sc | |||
541 | u32 len, | 473 | u32 len, |
542 | enum dma_data_direction dir) | 474 | enum dma_data_direction dir) |
543 | { | 475 | { |
544 | struct scu_task_context *task_context = sci_req->task_context_buffer; | 476 | struct scu_task_context *task_context = sci_req->tc; |
545 | 477 | ||
546 | /* Build the STP task context structure */ | 478 | /* Build the STP task context structure */ |
547 | scu_sata_reqeust_construct_task_context(sci_req, task_context); | 479 | scu_sata_reqeust_construct_task_context(sci_req, task_context); |
@@ -587,8 +519,7 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, | |||
587 | 519 | ||
588 | if (tmf->tmf_code == isci_tmf_sata_srst_high || | 520 | if (tmf->tmf_code == isci_tmf_sata_srst_high || |
589 | tmf->tmf_code == isci_tmf_sata_srst_low) { | 521 | tmf->tmf_code == isci_tmf_sata_srst_low) { |
590 | scu_stp_raw_request_construct_task_context(&sci_req->stp.req, | 522 | scu_stp_raw_request_construct_task_context(sci_req); |
591 | sci_req->task_context_buffer); | ||
592 | return SCI_SUCCESS; | 523 | return SCI_SUCCESS; |
593 | } else { | 524 | } else { |
594 | dev_err(scic_to_dev(sci_req->owning_controller), | 525 | dev_err(scic_to_dev(sci_req->owning_controller), |
@@ -611,8 +542,7 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, | |||
611 | 542 | ||
612 | /* non data */ | 543 | /* non data */ |
613 | if (task->data_dir == DMA_NONE) { | 544 | if (task->data_dir == DMA_NONE) { |
614 | scu_stp_raw_request_construct_task_context(&sci_req->stp.req, | 545 | scu_stp_raw_request_construct_task_context(sci_req); |
615 | sci_req->task_context_buffer); | ||
616 | return SCI_SUCCESS; | 546 | return SCI_SUCCESS; |
617 | } | 547 | } |
618 | 548 | ||
@@ -701,8 +631,7 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re | |||
701 | 631 | ||
702 | if (tmf->tmf_code == isci_tmf_sata_srst_high || | 632 | if (tmf->tmf_code == isci_tmf_sata_srst_high || |
703 | tmf->tmf_code == isci_tmf_sata_srst_low) { | 633 | tmf->tmf_code == isci_tmf_sata_srst_low) { |
704 | scu_stp_raw_request_construct_task_context(&sci_req->stp.req, | 634 | scu_stp_raw_request_construct_task_context(sci_req); |
705 | sci_req->task_context_buffer); | ||
706 | } else { | 635 | } else { |
707 | dev_err(scic_to_dev(sci_req->owning_controller), | 636 | dev_err(scic_to_dev(sci_req->owning_controller), |
708 | "%s: Request 0x%p received un-handled SAT " | 637 | "%s: Request 0x%p received un-handled SAT " |
@@ -749,9 +678,9 @@ static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) | |||
749 | 678 | ||
750 | enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) | 679 | enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) |
751 | { | 680 | { |
752 | struct scic_sds_controller *scic = sci_req->owning_controller; | ||
753 | struct scu_task_context *task_context; | ||
754 | enum sci_base_request_states state; | 681 | enum sci_base_request_states state; |
682 | struct scu_task_context *tc = sci_req->tc; | ||
683 | struct scic_sds_controller *scic = sci_req->owning_controller; | ||
755 | 684 | ||
756 | state = sci_req->sm.current_state_id; | 685 | state = sci_req->sm.current_state_id; |
757 | if (state != SCI_REQ_CONSTRUCTED) { | 686 | if (state != SCI_REQ_CONSTRUCTED) { |
@@ -761,61 +690,39 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) | |||
761 | return SCI_FAILURE_INVALID_STATE; | 690 | return SCI_FAILURE_INVALID_STATE; |
762 | } | 691 | } |
763 | 692 | ||
764 | /* if necessary, allocate a TCi for the io request object and then will, | 693 | tc->task_index = ISCI_TAG_TCI(sci_req->io_tag); |
765 | * if necessary, copy the constructed TC data into the actual TC buffer. | ||
766 | * If everything is successful the post context field is updated with | ||
767 | * the TCi so the controller can post the request to the hardware. | ||
768 | */ | ||
769 | if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) | ||
770 | sci_req->io_tag = scic_controller_allocate_io_tag(scic); | ||
771 | |||
772 | /* Record the IO Tag in the request */ | ||
773 | if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { | ||
774 | task_context = sci_req->task_context_buffer; | ||
775 | |||
776 | task_context->task_index = ISCI_TAG_TCI(sci_req->io_tag); | ||
777 | |||
778 | switch (task_context->protocol_type) { | ||
779 | case SCU_TASK_CONTEXT_PROTOCOL_SMP: | ||
780 | case SCU_TASK_CONTEXT_PROTOCOL_SSP: | ||
781 | /* SSP/SMP Frame */ | ||
782 | task_context->type.ssp.tag = sci_req->io_tag; | ||
783 | task_context->type.ssp.target_port_transfer_tag = | ||
784 | 0xFFFF; | ||
785 | break; | ||
786 | 694 | ||
787 | case SCU_TASK_CONTEXT_PROTOCOL_STP: | 695 | switch (tc->protocol_type) { |
788 | /* STP/SATA Frame | 696 | case SCU_TASK_CONTEXT_PROTOCOL_SMP: |
789 | * task_context->type.stp.ncq_tag = sci_req->ncq_tag; | 697 | case SCU_TASK_CONTEXT_PROTOCOL_SSP: |
790 | */ | 698 | /* SSP/SMP Frame */ |
791 | break; | 699 | tc->type.ssp.tag = sci_req->io_tag; |
792 | 700 | tc->type.ssp.target_port_transfer_tag = 0xFFFF; | |
793 | case SCU_TASK_CONTEXT_PROTOCOL_NONE: | 701 | break; |
794 | /* / @todo When do we set no protocol type? */ | ||
795 | break; | ||
796 | 702 | ||
797 | default: | 703 | case SCU_TASK_CONTEXT_PROTOCOL_STP: |
798 | /* This should never happen since we build the IO | 704 | /* STP/SATA Frame |
799 | * requests */ | 705 | * tc->type.stp.ncq_tag = sci_req->ncq_tag; |
800 | break; | 706 | */ |
801 | } | 707 | break; |
802 | 708 | ||
803 | /* | 709 | case SCU_TASK_CONTEXT_PROTOCOL_NONE: |
804 | * Check to see if we need to copy the task context buffer | 710 | /* / @todo When do we set no protocol type? */ |
805 | * or have been building into the task context buffer */ | 711 | break; |
806 | if (sci_req->was_tag_assigned_by_user == false) | ||
807 | scic_sds_controller_copy_task_context(scic, sci_req); | ||
808 | 712 | ||
809 | /* Add to the post_context the io tag value */ | 713 | default: |
810 | sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag); | 714 | /* This should never happen since we build the IO |
715 | * requests */ | ||
716 | break; | ||
717 | } | ||
811 | 718 | ||
812 | /* Everything is good go ahead and change state */ | 719 | /* Add to the post_context the io tag value */ |
813 | sci_change_state(&sci_req->sm, SCI_REQ_STARTED); | 720 | sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag); |
814 | 721 | ||
815 | return SCI_SUCCESS; | 722 | /* Everything is good go ahead and change state */ |
816 | } | 723 | sci_change_state(&sci_req->sm, SCI_REQ_STARTED); |
817 | 724 | ||
818 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; | 725 | return SCI_SUCCESS; |
819 | } | 726 | } |
820 | 727 | ||
821 | enum sci_status | 728 | enum sci_status |
@@ -880,9 +787,6 @@ enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) | |||
880 | "isci: request completion from wrong state (%d)\n", state)) | 787 | "isci: request completion from wrong state (%d)\n", state)) |
881 | return SCI_FAILURE_INVALID_STATE; | 788 | return SCI_FAILURE_INVALID_STATE; |
882 | 789 | ||
883 | if (!sci_req->was_tag_assigned_by_user) | ||
884 | scic_controller_free_io_tag(scic, sci_req->io_tag); | ||
885 | |||
886 | if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) | 790 | if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) |
887 | scic_sds_controller_release_frame(scic, | 791 | scic_sds_controller_release_frame(scic, |
888 | sci_req->saved_rx_frame_index); | 792 | sci_req->saved_rx_frame_index); |
@@ -1244,51 +1148,40 @@ void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req, | |||
1244 | * @note This could be made to return an error to the user if the user | 1148 | * @note This could be made to return an error to the user if the user |
1245 | * attempts to set the NCQ tag in the wrong state. | 1149 | * attempts to set the NCQ tag in the wrong state. |
1246 | */ | 1150 | */ |
1247 | req->task_context_buffer->type.stp.ncq_tag = ncq_tag; | 1151 | req->tc->type.stp.ncq_tag = ncq_tag; |
1248 | } | 1152 | } |
1249 | 1153 | ||
1250 | /** | 1154 | static struct scu_sgl_element *pio_sgl_next(struct scic_sds_stp_request *stp_req) |
1251 | * | ||
1252 | * @sci_req: | ||
1253 | * | ||
1254 | * Get the next SGL element from the request. - Check on which SGL element pair | ||
1255 | * we are working - if working on SLG pair element A - advance to element B - | ||
1256 | * else - check to see if there are more SGL element pairs for this IO request | ||
1257 | * - if there are more SGL element pairs - advance to the next pair and return | ||
1258 | * element A struct scu_sgl_element* | ||
1259 | */ | ||
1260 | static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) | ||
1261 | { | 1155 | { |
1262 | struct scu_sgl_element *current_sgl; | 1156 | struct scu_sgl_element *sgl; |
1157 | struct scu_sgl_element_pair *sgl_pair; | ||
1263 | struct scic_sds_request *sci_req = to_sci_req(stp_req); | 1158 | struct scic_sds_request *sci_req = to_sci_req(stp_req); |
1264 | struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; | 1159 | struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; |
1265 | 1160 | ||
1266 | if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { | 1161 | sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index); |
1267 | if (pio_sgl->sgl_pair->B.address_lower == 0 && | 1162 | if (!sgl_pair) |
1268 | pio_sgl->sgl_pair->B.address_upper == 0) { | 1163 | sgl = NULL; |
1269 | current_sgl = NULL; | 1164 | else if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { |
1165 | if (sgl_pair->B.address_lower == 0 && | ||
1166 | sgl_pair->B.address_upper == 0) { | ||
1167 | sgl = NULL; | ||
1270 | } else { | 1168 | } else { |
1271 | pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; | 1169 | pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; |
1272 | current_sgl = &pio_sgl->sgl_pair->B; | 1170 | sgl = &sgl_pair->B; |
1273 | } | 1171 | } |
1274 | } else { | 1172 | } else { |
1275 | if (pio_sgl->sgl_pair->next_pair_lower == 0 && | 1173 | if (sgl_pair->next_pair_lower == 0 && |
1276 | pio_sgl->sgl_pair->next_pair_upper == 0) { | 1174 | sgl_pair->next_pair_upper == 0) { |
1277 | current_sgl = NULL; | 1175 | sgl = NULL; |
1278 | } else { | 1176 | } else { |
1279 | u64 phys_addr; | 1177 | pio_sgl->sgl_index++; |
1280 | |||
1281 | phys_addr = pio_sgl->sgl_pair->next_pair_upper; | ||
1282 | phys_addr <<= 32; | ||
1283 | phys_addr |= pio_sgl->sgl_pair->next_pair_lower; | ||
1284 | |||
1285 | pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr); | ||
1286 | pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; | 1178 | pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; |
1287 | current_sgl = &pio_sgl->sgl_pair->A; | 1179 | sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index); |
1180 | sgl = &sgl_pair->A; | ||
1288 | } | 1181 | } |
1289 | } | 1182 | } |
1290 | 1183 | ||
1291 | return current_sgl; | 1184 | return sgl; |
1292 | } | 1185 | } |
1293 | 1186 | ||
1294 | static enum sci_status | 1187 | static enum sci_status |
@@ -1328,21 +1221,19 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( | |||
1328 | struct scic_sds_request *sci_req, | 1221 | struct scic_sds_request *sci_req, |
1329 | u32 length) | 1222 | u32 length) |
1330 | { | 1223 | { |
1331 | struct scic_sds_controller *scic = sci_req->owning_controller; | ||
1332 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; | 1224 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; |
1333 | struct scu_task_context *task_context; | 1225 | struct scu_task_context *task_context = sci_req->tc; |
1226 | struct scu_sgl_element_pair *sgl_pair; | ||
1334 | struct scu_sgl_element *current_sgl; | 1227 | struct scu_sgl_element *current_sgl; |
1335 | 1228 | ||
1336 | /* Recycle the TC and reconstruct it for sending out DATA FIS containing | 1229 | /* Recycle the TC and reconstruct it for sending out DATA FIS containing |
1337 | * for the data from current_sgl+offset for the input length | 1230 | * for the data from current_sgl+offset for the input length |
1338 | */ | 1231 | */ |
1339 | task_context = scic_sds_controller_get_task_context_buffer(scic, | 1232 | sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index); |
1340 | sci_req->io_tag); | ||
1341 | |||
1342 | if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) | 1233 | if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) |
1343 | current_sgl = &stp_req->type.pio.request_current.sgl_pair->A; | 1234 | current_sgl = &sgl_pair->A; |
1344 | else | 1235 | else |
1345 | current_sgl = &stp_req->type.pio.request_current.sgl_pair->B; | 1236 | current_sgl = &sgl_pair->B; |
1346 | 1237 | ||
1347 | /* update the TC */ | 1238 | /* update the TC */ |
1348 | task_context->command_iu_upper = current_sgl->address_upper; | 1239 | task_context->command_iu_upper = current_sgl->address_upper; |
@@ -1362,18 +1253,21 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc | |||
1362 | u32 remaining_bytes_in_current_sgl = 0; | 1253 | u32 remaining_bytes_in_current_sgl = 0; |
1363 | enum sci_status status = SCI_SUCCESS; | 1254 | enum sci_status status = SCI_SUCCESS; |
1364 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; | 1255 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; |
1256 | struct scu_sgl_element_pair *sgl_pair; | ||
1365 | 1257 | ||
1366 | sgl_offset = stp_req->type.pio.request_current.sgl_offset; | 1258 | sgl_offset = stp_req->type.pio.request_current.sgl_offset; |
1259 | sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index); | ||
1260 | if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) | ||
1261 | return SCI_FAILURE; | ||
1367 | 1262 | ||
1368 | if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { | 1263 | if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { |
1369 | current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A); | 1264 | current_sgl = &sgl_pair->A; |
1370 | remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset; | 1265 | remaining_bytes_in_current_sgl = sgl_pair->A.length - sgl_offset; |
1371 | } else { | 1266 | } else { |
1372 | current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B); | 1267 | current_sgl = &sgl_pair->B; |
1373 | remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset; | 1268 | remaining_bytes_in_current_sgl = sgl_pair->B.length - sgl_offset; |
1374 | } | 1269 | } |
1375 | 1270 | ||
1376 | |||
1377 | if (stp_req->type.pio.pio_transfer_bytes > 0) { | 1271 | if (stp_req->type.pio.pio_transfer_bytes > 0) { |
1378 | if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { | 1272 | if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { |
1379 | /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ | 1273 | /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ |
@@ -1382,7 +1276,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc | |||
1382 | stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; | 1276 | stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; |
1383 | 1277 | ||
1384 | /* update the current sgl, sgl_offset and save for future */ | 1278 | /* update the current sgl, sgl_offset and save for future */ |
1385 | current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req); | 1279 | current_sgl = pio_sgl_next(stp_req); |
1386 | sgl_offset = 0; | 1280 | sgl_offset = 0; |
1387 | } | 1281 | } |
1388 | } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { | 1282 | } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { |
@@ -1945,7 +1839,7 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, | |||
1945 | return status; | 1839 | return status; |
1946 | } | 1840 | } |
1947 | 1841 | ||
1948 | if (stp_req->type.pio.request_current.sgl_pair == NULL) { | 1842 | if (stp_req->type.pio.request_current.sgl_index < 0) { |
1949 | sci_req->saved_rx_frame_index = frame_index; | 1843 | sci_req->saved_rx_frame_index = frame_index; |
1950 | stp_req->type.pio.pio_transfer_bytes = 0; | 1844 | stp_req->type.pio.pio_transfer_bytes = 0; |
1951 | } else { | 1845 | } else { |
@@ -2977,8 +2871,6 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2977 | * task to recognize the already completed case. | 2871 | * task to recognize the already completed case. |
2978 | */ | 2872 | */ |
2979 | request->terminated = true; | 2873 | request->terminated = true; |
2980 | |||
2981 | isci_host_can_dequeue(isci_host, 1); | ||
2982 | } | 2874 | } |
2983 | 2875 | ||
2984 | static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) | 2876 | static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) |
@@ -3039,7 +2931,7 @@ static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine | |||
3039 | struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); | 2931 | struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); |
3040 | 2932 | ||
3041 | /* Setting the abort bit in the Task Context is required by the silicon. */ | 2933 | /* Setting the abort bit in the Task Context is required by the silicon. */ |
3042 | sci_req->task_context_buffer->abort = 1; | 2934 | sci_req->tc->abort = 1; |
3043 | } | 2935 | } |
3044 | 2936 | ||
3045 | static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) | 2937 | static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) |
@@ -3069,7 +2961,7 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completio | |||
3069 | static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) | 2961 | static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) |
3070 | { | 2962 | { |
3071 | struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); | 2963 | struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); |
3072 | struct scu_task_context *task_context; | 2964 | struct scu_task_context *tc = sci_req->tc; |
3073 | struct host_to_dev_fis *h2d_fis; | 2965 | struct host_to_dev_fis *h2d_fis; |
3074 | enum sci_status status; | 2966 | enum sci_status status; |
3075 | 2967 | ||
@@ -3078,9 +2970,7 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_complet | |||
3078 | h2d_fis->control = 0; | 2970 | h2d_fis->control = 0; |
3079 | 2971 | ||
3080 | /* Clear the TC control bit */ | 2972 | /* Clear the TC control bit */ |
3081 | task_context = scic_sds_controller_get_task_context_buffer( | 2973 | tc->control_frame = 0; |
3082 | sci_req->owning_controller, sci_req->io_tag); | ||
3083 | task_context->control_frame = 0; | ||
3084 | 2974 | ||
3085 | status = scic_controller_continue_io(sci_req); | 2975 | status = scic_controller_continue_io(sci_req); |
3086 | WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); | 2976 | WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); |
@@ -3141,18 +3031,10 @@ scic_sds_general_request_construct(struct scic_sds_controller *scic, | |||
3141 | sci_req->sci_status = SCI_SUCCESS; | 3031 | sci_req->sci_status = SCI_SUCCESS; |
3142 | sci_req->scu_status = 0; | 3032 | sci_req->scu_status = 0; |
3143 | sci_req->post_context = 0xFFFFFFFF; | 3033 | sci_req->post_context = 0xFFFFFFFF; |
3034 | sci_req->tc = &scic->task_context_table[ISCI_TAG_TCI(io_tag)]; | ||
3144 | 3035 | ||
3145 | sci_req->is_task_management_request = false; | 3036 | sci_req->is_task_management_request = false; |
3146 | 3037 | WARN_ONCE(io_tag == SCI_CONTROLLER_INVALID_IO_TAG, "straggling invalid tag usage\n"); | |
3147 | if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { | ||
3148 | sci_req->was_tag_assigned_by_user = false; | ||
3149 | sci_req->task_context_buffer = &sci_req->tc; | ||
3150 | } else { | ||
3151 | sci_req->was_tag_assigned_by_user = true; | ||
3152 | |||
3153 | sci_req->task_context_buffer = | ||
3154 | scic_sds_controller_get_task_context_buffer(scic, io_tag); | ||
3155 | } | ||
3156 | } | 3038 | } |
3157 | 3039 | ||
3158 | static enum sci_status | 3040 | static enum sci_status |
@@ -3178,8 +3060,7 @@ scic_io_request_construct(struct scic_sds_controller *scic, | |||
3178 | else | 3060 | else |
3179 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3061 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3180 | 3062 | ||
3181 | memset(sci_req->task_context_buffer, 0, | 3063 | memset(sci_req->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); |
3182 | offsetof(struct scu_task_context, sgl_pair_ab)); | ||
3183 | 3064 | ||
3184 | return status; | 3065 | return status; |
3185 | } | 3066 | } |
@@ -3197,7 +3078,7 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, | |||
3197 | if (dev->dev_type == SAS_END_DEV || | 3078 | if (dev->dev_type == SAS_END_DEV || |
3198 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | 3079 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { |
3199 | sci_req->is_task_management_request = true; | 3080 | sci_req->is_task_management_request = true; |
3200 | memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); | 3081 | memset(sci_req->tc, 0, sizeof(struct scu_task_context)); |
3201 | } else | 3082 | } else |
3202 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3083 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3203 | 3084 | ||
@@ -3299,7 +3180,7 @@ scic_io_request_construct_smp(struct device *dev, | |||
3299 | 3180 | ||
3300 | /* byte swap the smp request. */ | 3181 | /* byte swap the smp request. */ |
3301 | 3182 | ||
3302 | task_context = scic_sds_request_get_task_context(sci_req); | 3183 | task_context = sci_req->tc; |
3303 | 3184 | ||
3304 | sci_dev = scic_sds_request_get_device(sci_req); | 3185 | sci_dev = scic_sds_request_get_device(sci_req); |
3305 | sci_port = scic_sds_request_get_port(sci_req); | 3186 | sci_port = scic_sds_request_get_port(sci_req); |
@@ -3354,33 +3235,12 @@ scic_io_request_construct_smp(struct device *dev, | |||
3354 | */ | 3235 | */ |
3355 | task_context->task_phase = 0; | 3236 | task_context->task_phase = 0; |
3356 | 3237 | ||
3357 | if (sci_req->was_tag_assigned_by_user) { | 3238 | sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
3358 | /* | 3239 | (scic_sds_controller_get_protocol_engine_group(scic) << |
3359 | * Build the task context now since we have already read | 3240 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
3360 | * the data | 3241 | (scic_sds_port_get_index(sci_port) << |
3361 | */ | 3242 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
3362 | sci_req->post_context = | 3243 | ISCI_TAG_TCI(sci_req->io_tag)); |
3363 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
3364 | (scic_sds_controller_get_protocol_engine_group(scic) << | ||
3365 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
3366 | (scic_sds_port_get_index(sci_port) << | ||
3367 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | ||
3368 | ISCI_TAG_TCI(sci_req->io_tag)); | ||
3369 | } else { | ||
3370 | /* | ||
3371 | * Build the task context now since we have already read | ||
3372 | * the data. | ||
3373 | * I/O tag index is not assigned because we have to wait | ||
3374 | * until we get a TCi. | ||
3375 | */ | ||
3376 | sci_req->post_context = | ||
3377 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
3378 | (scic_sds_controller_get_protocol_engine_group(scic) << | ||
3379 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
3380 | (scic_sds_port_get_index(sci_port) << | ||
3381 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); | ||
3382 | } | ||
3383 | |||
3384 | /* | 3244 | /* |
3385 | * Copy the physical address for the command buffer to the SCU Task | 3245 | * Copy the physical address for the command buffer to the SCU Task |
3386 | * Context command buffer should not contain command header. | 3246 | * Context command buffer should not contain command header. |
@@ -3431,10 +3291,10 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) | |||
3431 | * | 3291 | * |
3432 | * SCI_SUCCESS on successfull completion, or specific failure code. | 3292 | * SCI_SUCCESS on successfull completion, or specific failure code. |
3433 | */ | 3293 | */ |
3434 | static enum sci_status isci_io_request_build( | 3294 | static enum sci_status isci_io_request_build(struct isci_host *isci_host, |
3435 | struct isci_host *isci_host, | 3295 | struct isci_request *request, |
3436 | struct isci_request *request, | 3296 | struct isci_remote_device *isci_device, |
3437 | struct isci_remote_device *isci_device) | 3297 | u16 tag) |
3438 | { | 3298 | { |
3439 | enum sci_status status = SCI_SUCCESS; | 3299 | enum sci_status status = SCI_SUCCESS; |
3440 | struct sas_task *task = isci_request_access_task(request); | 3300 | struct sas_task *task = isci_request_access_task(request); |
@@ -3471,8 +3331,7 @@ static enum sci_status isci_io_request_build( | |||
3471 | * we will let the core allocate the IO tag. | 3331 | * we will let the core allocate the IO tag. |
3472 | */ | 3332 | */ |
3473 | status = scic_io_request_construct(&isci_host->sci, sci_device, | 3333 | status = scic_io_request_construct(&isci_host->sci, sci_device, |
3474 | SCI_CONTROLLER_INVALID_IO_TAG, | 3334 | tag, &request->sci); |
3475 | &request->sci); | ||
3476 | 3335 | ||
3477 | if (status != SCI_SUCCESS) { | 3336 | if (status != SCI_SUCCESS) { |
3478 | dev_warn(&isci_host->pdev->dev, | 3337 | dev_warn(&isci_host->pdev->dev, |
@@ -3564,7 +3423,7 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, | |||
3564 | } | 3423 | } |
3565 | 3424 | ||
3566 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, | 3425 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, |
3567 | struct sas_task *task, gfp_t gfp_flags) | 3426 | struct sas_task *task, u16 tag, gfp_t gfp_flags) |
3568 | { | 3427 | { |
3569 | enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3428 | enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3570 | struct isci_request *ireq; | 3429 | struct isci_request *ireq; |
@@ -3576,7 +3435,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3576 | if (!ireq) | 3435 | if (!ireq) |
3577 | goto out; | 3436 | goto out; |
3578 | 3437 | ||
3579 | status = isci_io_request_build(ihost, ireq, idev); | 3438 | status = isci_io_request_build(ihost, ireq, idev, tag); |
3580 | if (status != SCI_SUCCESS) { | 3439 | if (status != SCI_SUCCESS) { |
3581 | dev_warn(&ihost->pdev->dev, | 3440 | dev_warn(&ihost->pdev->dev, |
3582 | "%s: request_construct failed - status = 0x%x\n", | 3441 | "%s: request_construct failed - status = 0x%x\n", |
@@ -3599,18 +3458,16 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3599 | */ | 3458 | */ |
3600 | status = scic_controller_start_task(&ihost->sci, | 3459 | status = scic_controller_start_task(&ihost->sci, |
3601 | &idev->sci, | 3460 | &idev->sci, |
3602 | &ireq->sci, | 3461 | &ireq->sci); |
3603 | SCI_CONTROLLER_INVALID_IO_TAG); | ||
3604 | } else { | 3462 | } else { |
3605 | status = SCI_FAILURE; | 3463 | status = SCI_FAILURE; |
3606 | } | 3464 | } |
3607 | } else { | 3465 | } else { |
3608 | |||
3609 | /* send the request, let the core assign the IO TAG. */ | 3466 | /* send the request, let the core assign the IO TAG. */ |
3610 | status = scic_controller_start_io(&ihost->sci, &idev->sci, | 3467 | status = scic_controller_start_io(&ihost->sci, &idev->sci, |
3611 | &ireq->sci, | 3468 | &ireq->sci); |
3612 | SCI_CONTROLLER_INVALID_IO_TAG); | ||
3613 | } | 3469 | } |
3470 | |||
3614 | if (status != SCI_SUCCESS && | 3471 | if (status != SCI_SUCCESS && |
3615 | status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { | 3472 | status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { |
3616 | dev_warn(&ihost->pdev->dev, | 3473 | dev_warn(&ihost->pdev->dev, |
@@ -3647,23 +3504,23 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3647 | if (status == | 3504 | if (status == |
3648 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { | 3505 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { |
3649 | /* Signal libsas that we need the SCSI error | 3506 | /* Signal libsas that we need the SCSI error |
3650 | * handler thread to work on this I/O and that | 3507 | * handler thread to work on this I/O and that |
3651 | * we want a device reset. | 3508 | * we want a device reset. |
3652 | */ | 3509 | */ |
3653 | spin_lock_irqsave(&task->task_state_lock, flags); | 3510 | spin_lock_irqsave(&task->task_state_lock, flags); |
3654 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | 3511 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; |
3655 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 3512 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
3656 | 3513 | ||
3657 | /* Cause this task to be scheduled in the SCSI error | 3514 | /* Cause this task to be scheduled in the SCSI error |
3658 | * handler thread. | 3515 | * handler thread. |
3659 | */ | 3516 | */ |
3660 | isci_execpath_callback(ihost, task, | 3517 | isci_execpath_callback(ihost, task, |
3661 | sas_task_abort); | 3518 | sas_task_abort); |
3662 | 3519 | ||
3663 | /* Change the status, since we are holding | 3520 | /* Change the status, since we are holding |
3664 | * the I/O until it is managed by the SCSI | 3521 | * the I/O until it is managed by the SCSI |
3665 | * error handler. | 3522 | * error handler. |
3666 | */ | 3523 | */ |
3667 | status = SCI_SUCCESS; | 3524 | status = SCI_SUCCESS; |
3668 | } | 3525 | } |
3669 | 3526 | ||