aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci/request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/isci/request.c')
-rw-r--r--drivers/scsi/isci/request.c360
1 files changed, 180 insertions, 180 deletions
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 36e674896bc5..bcb3c08c19a7 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -89,7 +89,7 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
89 return ihost->task_context_dma + offset; 89 return ihost->task_context_dma + offset;
90 } 90 }
91 91
92 return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 92 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
93} 93}
94 94
95static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) 95static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
@@ -100,7 +100,7 @@ static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
100 e->address_modifier = 0; 100 e->address_modifier = 0;
101} 101}
102 102
103static void scic_sds_request_build_sgl(struct isci_request *ireq) 103static void sci_request_build_sgl(struct isci_request *ireq)
104{ 104{
105 struct isci_host *ihost = ireq->isci_host; 105 struct isci_host *ihost = ireq->isci_host;
106 struct sas_task *task = isci_request_access_task(ireq); 106 struct sas_task *task = isci_request_access_task(ireq);
@@ -158,7 +158,7 @@ static void scic_sds_request_build_sgl(struct isci_request *ireq)
158 } 158 }
159} 159}
160 160
161static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq) 161static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
162{ 162{
163 struct ssp_cmd_iu *cmd_iu; 163 struct ssp_cmd_iu *cmd_iu;
164 struct sas_task *task = isci_request_access_task(ireq); 164 struct sas_task *task = isci_request_access_task(ireq);
@@ -178,7 +178,7 @@ static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq)
178 sizeof(task->ssp_task.cdb) / sizeof(u32)); 178 sizeof(task->ssp_task.cdb) / sizeof(u32));
179} 179}
180 180
181static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq) 181static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
182{ 182{
183 struct ssp_task_iu *task_iu; 183 struct ssp_task_iu *task_iu;
184 struct sas_task *task = isci_request_access_task(ireq); 184 struct sas_task *task = isci_request_access_task(ireq);
@@ -211,8 +211,8 @@ static void scu_ssp_reqeust_construct_task_context(
211 struct isci_remote_device *idev; 211 struct isci_remote_device *idev;
212 struct isci_port *iport; 212 struct isci_port *iport;
213 213
214 idev = scic_sds_request_get_device(ireq); 214 idev = sci_request_get_device(ireq);
215 iport = scic_sds_request_get_port(ireq); 215 iport = sci_request_get_port(ireq);
216 216
217 /* Fill in the TC with the its required data */ 217 /* Fill in the TC with the its required data */
218 task_context->abort = 0; 218 task_context->abort = 0;
@@ -220,13 +220,13 @@ static void scu_ssp_reqeust_construct_task_context(
220 task_context->initiator_request = 1; 220 task_context->initiator_request = 1;
221 task_context->connection_rate = idev->connection_rate; 221 task_context->connection_rate = idev->connection_rate;
222 task_context->protocol_engine_index = 222 task_context->protocol_engine_index =
223 scic_sds_controller_get_protocol_engine_group(controller); 223 sci_controller_get_protocol_engine_group(controller);
224 task_context->logical_port_index = scic_sds_port_get_index(iport); 224 task_context->logical_port_index = sci_port_get_index(iport);
225 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 225 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
226 task_context->valid = SCU_TASK_CONTEXT_VALID; 226 task_context->valid = SCU_TASK_CONTEXT_VALID;
227 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 227 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
228 228
229 task_context->remote_node_index = scic_sds_remote_device_get_index(idev); 229 task_context->remote_node_index = sci_remote_device_get_index(idev);
230 task_context->command_code = 0; 230 task_context->command_code = 0;
231 231
232 task_context->link_layer_control = 0; 232 task_context->link_layer_control = 0;
@@ -242,9 +242,9 @@ static void scu_ssp_reqeust_construct_task_context(
242 task_context->task_phase = 0x01; 242 task_context->task_phase = 0x01;
243 243
244 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 244 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
245 (scic_sds_controller_get_protocol_engine_group(controller) << 245 (sci_controller_get_protocol_engine_group(controller) <<
246 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 246 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
247 (scic_sds_port_get_index(iport) << 247 (sci_port_get_index(iport) <<
248 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 248 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
249 ISCI_TAG_TCI(ireq->io_tag)); 249 ISCI_TAG_TCI(ireq->io_tag));
250 250
@@ -252,7 +252,7 @@ static void scu_ssp_reqeust_construct_task_context(
252 * Copy the physical address for the command buffer to the 252 * Copy the physical address for the command buffer to the
253 * SCU Task Context 253 * SCU Task Context
254 */ 254 */
255 dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); 255 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
256 256
257 task_context->command_iu_upper = upper_32_bits(dma_addr); 257 task_context->command_iu_upper = upper_32_bits(dma_addr);
258 task_context->command_iu_lower = lower_32_bits(dma_addr); 258 task_context->command_iu_lower = lower_32_bits(dma_addr);
@@ -261,7 +261,7 @@ static void scu_ssp_reqeust_construct_task_context(
261 * Copy the physical address for the response buffer to the 261 * Copy the physical address for the response buffer to the
262 * SCU Task Context 262 * SCU Task Context
263 */ 263 */
264 dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); 264 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
265 265
266 task_context->response_iu_upper = upper_32_bits(dma_addr); 266 task_context->response_iu_upper = upper_32_bits(dma_addr);
267 task_context->response_iu_lower = lower_32_bits(dma_addr); 267 task_context->response_iu_lower = lower_32_bits(dma_addr);
@@ -298,7 +298,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
298 task_context->transfer_length_bytes = len; 298 task_context->transfer_length_bytes = len;
299 299
300 if (task_context->transfer_length_bytes > 0) 300 if (task_context->transfer_length_bytes > 0)
301 scic_sds_request_build_sgl(ireq); 301 sci_request_build_sgl(ireq);
302} 302}
303 303
304/** 304/**
@@ -349,8 +349,8 @@ static void scu_sata_reqeust_construct_task_context(
349 struct isci_remote_device *idev; 349 struct isci_remote_device *idev;
350 struct isci_port *iport; 350 struct isci_port *iport;
351 351
352 idev = scic_sds_request_get_device(ireq); 352 idev = sci_request_get_device(ireq);
353 iport = scic_sds_request_get_port(ireq); 353 iport = sci_request_get_port(ireq);
354 354
355 /* Fill in the TC with the its required data */ 355 /* Fill in the TC with the its required data */
356 task_context->abort = 0; 356 task_context->abort = 0;
@@ -358,14 +358,14 @@ static void scu_sata_reqeust_construct_task_context(
358 task_context->initiator_request = 1; 358 task_context->initiator_request = 1;
359 task_context->connection_rate = idev->connection_rate; 359 task_context->connection_rate = idev->connection_rate;
360 task_context->protocol_engine_index = 360 task_context->protocol_engine_index =
361 scic_sds_controller_get_protocol_engine_group(controller); 361 sci_controller_get_protocol_engine_group(controller);
362 task_context->logical_port_index = 362 task_context->logical_port_index =
363 scic_sds_port_get_index(iport); 363 sci_port_get_index(iport);
364 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; 364 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
365 task_context->valid = SCU_TASK_CONTEXT_VALID; 365 task_context->valid = SCU_TASK_CONTEXT_VALID;
366 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 366 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
367 367
368 task_context->remote_node_index = scic_sds_remote_device_get_index(idev); 368 task_context->remote_node_index = sci_remote_device_get_index(idev);
369 task_context->command_code = 0; 369 task_context->command_code = 0;
370 370
371 task_context->link_layer_control = 0; 371 task_context->link_layer_control = 0;
@@ -385,9 +385,9 @@ static void scu_sata_reqeust_construct_task_context(
385 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; 385 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
386 386
387 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 387 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
388 (scic_sds_controller_get_protocol_engine_group(controller) << 388 (sci_controller_get_protocol_engine_group(controller) <<
389 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 389 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
390 (scic_sds_port_get_index(iport) << 390 (sci_port_get_index(iport) <<
391 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 391 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
392 ISCI_TAG_TCI(ireq->io_tag)); 392 ISCI_TAG_TCI(ireq->io_tag));
393 /* 393 /*
@@ -395,7 +395,7 @@ static void scu_sata_reqeust_construct_task_context(
395 * Context. We must offset the command buffer by 4 bytes because the 395 * Context. We must offset the command buffer by 4 bytes because the
396 * first 4 bytes are transfered in the body of the TC. 396 * first 4 bytes are transfered in the body of the TC.
397 */ 397 */
398 dma_addr = scic_io_request_get_dma_addr(ireq, 398 dma_addr = sci_io_request_get_dma_addr(ireq,
399 ((char *) &ireq->stp.cmd) + 399 ((char *) &ireq->stp.cmd) +
400 sizeof(u32)); 400 sizeof(u32));
401 401
@@ -420,7 +420,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq
420 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 420 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
421} 421}
422 422
423static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq, 423static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
424 bool copy_rx_frame) 424 bool copy_rx_frame)
425{ 425{
426 struct isci_stp_request *stp_req = &ireq->stp.req; 426 struct isci_stp_request *stp_req = &ireq->stp.req;
@@ -432,7 +432,7 @@ static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *i
432 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; 432 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
433 433
434 if (copy_rx_frame) { 434 if (copy_rx_frame) {
435 scic_sds_request_build_sgl(ireq); 435 sci_request_build_sgl(ireq);
436 stp_req->sgl.index = 0; 436 stp_req->sgl.index = 0;
437 } else { 437 } else {
438 /* The user does not want the data copied to the SGL buffer location */ 438 /* The user does not want the data copied to the SGL buffer location */
@@ -454,7 +454,7 @@ static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *i
454 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 454 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
455 * returns an indication as to whether the construction was successful. 455 * returns an indication as to whether the construction was successful.
456 */ 456 */
457static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, 457static void sci_stp_optimized_request_construct(struct isci_request *ireq,
458 u8 optimized_task_type, 458 u8 optimized_task_type,
459 u32 len, 459 u32 len,
460 enum dma_data_direction dir) 460 enum dma_data_direction dir)
@@ -465,7 +465,7 @@ static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq,
465 scu_sata_reqeust_construct_task_context(ireq, task_context); 465 scu_sata_reqeust_construct_task_context(ireq, task_context);
466 466
467 /* Copy over the SGL elements */ 467 /* Copy over the SGL elements */
468 scic_sds_request_build_sgl(ireq); 468 sci_request_build_sgl(ireq);
469 469
470 /* Copy over the number of bytes to be transfered */ 470 /* Copy over the number of bytes to be transfered */
471 task_context->transfer_length_bytes = len; 471 task_context->transfer_length_bytes = len;
@@ -490,7 +490,7 @@ static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq,
490 490
491 491
492static enum sci_status 492static enum sci_status
493scic_io_request_construct_sata(struct isci_request *ireq, 493sci_io_request_construct_sata(struct isci_request *ireq,
494 u32 len, 494 u32 len,
495 enum dma_data_direction dir, 495 enum dma_data_direction dir,
496 bool copy) 496 bool copy)
@@ -533,7 +533,7 @@ scic_io_request_construct_sata(struct isci_request *ireq,
533 533
534 /* NCQ */ 534 /* NCQ */
535 if (task->ata_task.use_ncq) { 535 if (task->ata_task.use_ncq) {
536 scic_sds_stp_optimized_request_construct(ireq, 536 sci_stp_optimized_request_construct(ireq,
537 SCU_TASK_TYPE_FPDMAQ_READ, 537 SCU_TASK_TYPE_FPDMAQ_READ,
538 len, dir); 538 len, dir);
539 return SCI_SUCCESS; 539 return SCI_SUCCESS;
@@ -541,17 +541,17 @@ scic_io_request_construct_sata(struct isci_request *ireq,
541 541
542 /* DMA */ 542 /* DMA */
543 if (task->ata_task.dma_xfer) { 543 if (task->ata_task.dma_xfer) {
544 scic_sds_stp_optimized_request_construct(ireq, 544 sci_stp_optimized_request_construct(ireq,
545 SCU_TASK_TYPE_DMA_IN, 545 SCU_TASK_TYPE_DMA_IN,
546 len, dir); 546 len, dir);
547 return SCI_SUCCESS; 547 return SCI_SUCCESS;
548 } else /* PIO */ 548 } else /* PIO */
549 return scic_sds_stp_pio_request_construct(ireq, copy); 549 return sci_stp_pio_request_construct(ireq, copy);
550 550
551 return status; 551 return status;
552} 552}
553 553
554static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq) 554static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
555{ 555{
556 struct sas_task *task = isci_request_access_task(ireq); 556 struct sas_task *task = isci_request_access_task(ireq);
557 557
@@ -561,28 +561,28 @@ static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *
561 task->data_dir, 561 task->data_dir,
562 task->total_xfer_len); 562 task->total_xfer_len);
563 563
564 scic_sds_io_request_build_ssp_command_iu(ireq); 564 sci_io_request_build_ssp_command_iu(ireq);
565 565
566 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 566 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
567 567
568 return SCI_SUCCESS; 568 return SCI_SUCCESS;
569} 569}
570 570
571enum sci_status scic_task_request_construct_ssp( 571enum sci_status sci_task_request_construct_ssp(
572 struct isci_request *ireq) 572 struct isci_request *ireq)
573{ 573{
574 /* Construct the SSP Task SCU Task Context */ 574 /* Construct the SSP Task SCU Task Context */
575 scu_ssp_task_request_construct_task_context(ireq); 575 scu_ssp_task_request_construct_task_context(ireq);
576 576
577 /* Fill in the SSP Task IU */ 577 /* Fill in the SSP Task IU */
578 scic_sds_task_request_build_ssp_task_iu(ireq); 578 sci_task_request_build_ssp_task_iu(ireq);
579 579
580 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 580 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
581 581
582 return SCI_SUCCESS; 582 return SCI_SUCCESS;
583} 583}
584 584
585static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq) 585static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
586{ 586{
587 enum sci_status status; 587 enum sci_status status;
588 bool copy = false; 588 bool copy = false;
@@ -592,7 +592,7 @@ static enum sci_status scic_io_request_construct_basic_sata(struct isci_request
592 592
593 copy = (task->data_dir == DMA_NONE) ? false : true; 593 copy = (task->data_dir == DMA_NONE) ? false : true;
594 594
595 status = scic_io_request_construct_sata(ireq, 595 status = sci_io_request_construct_sata(ireq,
596 task->total_xfer_len, 596 task->total_xfer_len,
597 task->data_dir, 597 task->data_dir,
598 copy); 598 copy);
@@ -603,7 +603,7 @@ static enum sci_status scic_io_request_construct_basic_sata(struct isci_request
603 return status; 603 return status;
604} 604}
605 605
606enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) 606enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
607{ 607{
608 enum sci_status status = SCI_SUCCESS; 608 enum sci_status status = SCI_SUCCESS;
609 609
@@ -648,7 +648,7 @@ static u32 sci_req_tx_bytes(struct isci_request *ireq)
648 * BAR1 is the scu_registers 648 * BAR1 is the scu_registers
649 * 0x20002C = 0x200000 + 0x2c 649 * 0x20002C = 0x200000 + 0x2c
650 * = start of task context SRAM + offset of (type.ssp.data_offset) 650 * = start of task context SRAM + offset of (type.ssp.data_offset)
651 * TCi is the io_tag of struct scic_sds_request 651 * TCi is the io_tag of struct sci_request
652 */ 652 */
653 ret_val = readl(scu_reg_base + 653 ret_val = readl(scu_reg_base +
654 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 654 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
@@ -658,7 +658,7 @@ static u32 sci_req_tx_bytes(struct isci_request *ireq)
658 return ret_val; 658 return ret_val;
659} 659}
660 660
661enum sci_status scic_sds_request_start(struct isci_request *ireq) 661enum sci_status sci_request_start(struct isci_request *ireq)
662{ 662{
663 enum sci_base_request_states state; 663 enum sci_base_request_states state;
664 struct scu_task_context *tc = ireq->tc; 664 struct scu_task_context *tc = ireq->tc;
@@ -708,7 +708,7 @@ enum sci_status scic_sds_request_start(struct isci_request *ireq)
708} 708}
709 709
710enum sci_status 710enum sci_status
711scic_sds_io_request_terminate(struct isci_request *ireq) 711sci_io_request_terminate(struct isci_request *ireq)
712{ 712{
713 enum sci_base_request_states state; 713 enum sci_base_request_states state;
714 714
@@ -716,7 +716,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq)
716 716
717 switch (state) { 717 switch (state) {
718 case SCI_REQ_CONSTRUCTED: 718 case SCI_REQ_CONSTRUCTED:
719 scic_sds_request_set_status(ireq, 719 sci_request_set_status(ireq,
720 SCU_TASK_DONE_TASK_ABORT, 720 SCU_TASK_DONE_TASK_ABORT,
721 SCI_FAILURE_IO_TERMINATED); 721 SCI_FAILURE_IO_TERMINATED);
722 722
@@ -759,7 +759,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq)
759 return SCI_FAILURE_INVALID_STATE; 759 return SCI_FAILURE_INVALID_STATE;
760} 760}
761 761
762enum sci_status scic_sds_request_complete(struct isci_request *ireq) 762enum sci_status sci_request_complete(struct isci_request *ireq)
763{ 763{
764 enum sci_base_request_states state; 764 enum sci_base_request_states state;
765 struct isci_host *ihost = ireq->owning_controller; 765 struct isci_host *ihost = ireq->owning_controller;
@@ -770,7 +770,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq)
770 return SCI_FAILURE_INVALID_STATE; 770 return SCI_FAILURE_INVALID_STATE;
771 771
772 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 772 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
773 scic_sds_controller_release_frame(ihost, 773 sci_controller_release_frame(ihost,
774 ireq->saved_rx_frame_index); 774 ireq->saved_rx_frame_index);
775 775
776 /* XXX can we just stop the machine and remove the 'final' state? */ 776 /* XXX can we just stop the machine and remove the 'final' state? */
@@ -778,7 +778,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq)
778 return SCI_SUCCESS; 778 return SCI_SUCCESS;
779} 779}
780 780
781enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, 781enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
782 u32 event_code) 782 u32 event_code)
783{ 783{
784 enum sci_base_request_states state; 784 enum sci_base_request_states state;
@@ -818,7 +818,7 @@ enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq,
818 * @sci_req: This parameter specifies the request object for which to copy 818 * @sci_req: This parameter specifies the request object for which to copy
819 * the response data. 819 * the response data.
820 */ 820 */
821static void scic_sds_io_request_copy_response(struct isci_request *ireq) 821static void sci_io_request_copy_response(struct isci_request *ireq)
822{ 822{
823 void *resp_buf; 823 void *resp_buf;
824 u32 len; 824 u32 len;
@@ -848,7 +848,7 @@ request_started_state_tc_event(struct isci_request *ireq,
848 */ 848 */
849 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 849 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
850 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 850 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
851 scic_sds_request_set_status(ireq, 851 sci_request_set_status(ireq,
852 SCU_TASK_DONE_GOOD, 852 SCU_TASK_DONE_GOOD,
853 SCI_SUCCESS); 853 SCI_SUCCESS);
854 break; 854 break;
@@ -868,11 +868,11 @@ request_started_state_tc_event(struct isci_request *ireq,
868 word_cnt); 868 word_cnt);
869 869
870 if (resp->status == 0) { 870 if (resp->status == 0) {
871 scic_sds_request_set_status(ireq, 871 sci_request_set_status(ireq,
872 SCU_TASK_DONE_GOOD, 872 SCU_TASK_DONE_GOOD,
873 SCI_SUCCESS_IO_DONE_EARLY); 873 SCI_SUCCESS_IO_DONE_EARLY);
874 } else { 874 } else {
875 scic_sds_request_set_status(ireq, 875 sci_request_set_status(ireq,
876 SCU_TASK_DONE_CHECK_RESPONSE, 876 SCU_TASK_DONE_CHECK_RESPONSE,
877 SCI_FAILURE_IO_RESPONSE_VALID); 877 SCI_FAILURE_IO_RESPONSE_VALID);
878 } 878 }
@@ -885,7 +885,7 @@ request_started_state_tc_event(struct isci_request *ireq,
885 &ireq->ssp.rsp, 885 &ireq->ssp.rsp,
886 word_cnt); 886 word_cnt);
887 887
888 scic_sds_request_set_status(ireq, 888 sci_request_set_status(ireq,
889 SCU_TASK_DONE_CHECK_RESPONSE, 889 SCU_TASK_DONE_CHECK_RESPONSE,
890 SCI_FAILURE_IO_RESPONSE_VALID); 890 SCI_FAILURE_IO_RESPONSE_VALID);
891 break; 891 break;
@@ -900,11 +900,11 @@ request_started_state_tc_event(struct isci_request *ireq,
900 datapres = resp_iu->datapres; 900 datapres = resp_iu->datapres;
901 901
902 if (datapres == 1 || datapres == 2) { 902 if (datapres == 1 || datapres == 2) {
903 scic_sds_request_set_status(ireq, 903 sci_request_set_status(ireq,
904 SCU_TASK_DONE_CHECK_RESPONSE, 904 SCU_TASK_DONE_CHECK_RESPONSE,
905 SCI_FAILURE_IO_RESPONSE_VALID); 905 SCI_FAILURE_IO_RESPONSE_VALID);
906 } else 906 } else
907 scic_sds_request_set_status(ireq, 907 sci_request_set_status(ireq,
908 SCU_TASK_DONE_GOOD, 908 SCU_TASK_DONE_GOOD,
909 SCI_SUCCESS); 909 SCI_SUCCESS);
910 break; 910 break;
@@ -921,12 +921,12 @@ request_started_state_tc_event(struct isci_request *ireq,
921 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 921 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
922 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 922 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
923 if (ireq->protocol == SCIC_STP_PROTOCOL) { 923 if (ireq->protocol == SCIC_STP_PROTOCOL) {
924 scic_sds_request_set_status(ireq, 924 sci_request_set_status(ireq,
925 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 925 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
926 SCU_COMPLETION_TL_STATUS_SHIFT, 926 SCU_COMPLETION_TL_STATUS_SHIFT,
927 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 927 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
928 } else { 928 } else {
929 scic_sds_request_set_status(ireq, 929 sci_request_set_status(ireq,
930 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 930 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
931 SCU_COMPLETION_TL_STATUS_SHIFT, 931 SCU_COMPLETION_TL_STATUS_SHIFT,
932 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 932 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
@@ -944,7 +944,7 @@ request_started_state_tc_event(struct isci_request *ireq,
944 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 944 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
945 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 945 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
946 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 946 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
947 scic_sds_request_set_status(ireq, 947 sci_request_set_status(ireq,
948 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 948 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
949 SCU_COMPLETION_TL_STATUS_SHIFT, 949 SCU_COMPLETION_TL_STATUS_SHIFT,
950 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 950 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
@@ -967,7 +967,7 @@ request_started_state_tc_event(struct isci_request *ireq,
967 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 967 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
968 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 968 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
969 default: 969 default:
970 scic_sds_request_set_status( 970 sci_request_set_status(
971 ireq, 971 ireq,
972 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 972 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
973 SCU_COMPLETION_TL_STATUS_SHIFT, 973 SCU_COMPLETION_TL_STATUS_SHIFT,
@@ -991,7 +991,7 @@ request_aborting_state_tc_event(struct isci_request *ireq,
991 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 991 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
992 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 992 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
993 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 993 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
994 scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, 994 sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT,
995 SCI_FAILURE_IO_TERMINATED); 995 SCI_FAILURE_IO_TERMINATED);
996 996
997 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 997 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
@@ -1012,7 +1012,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq
1012{ 1012{
1013 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1013 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1014 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1014 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1015 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, 1015 sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1016 SCI_SUCCESS); 1016 SCI_SUCCESS);
1017 1017
1018 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1018 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
@@ -1036,7 +1036,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq
1036 * If a NAK was received, then it is up to the user to retry 1036 * If a NAK was received, then it is up to the user to retry
1037 * the request. 1037 * the request.
1038 */ 1038 */
1039 scic_sds_request_set_status(ireq, 1039 sci_request_set_status(ireq,
1040 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1040 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1041 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1041 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1042 1042
@@ -1057,7 +1057,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq,
1057 * unexpected. but if the TC has success status, we 1057 * unexpected. but if the TC has success status, we
1058 * complete the IO anyway. 1058 * complete the IO anyway.
1059 */ 1059 */
1060 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, 1060 sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1061 SCI_SUCCESS); 1061 SCI_SUCCESS);
1062 1062
1063 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1063 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
@@ -1074,7 +1074,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq,
1074 * these SMP_XXX_XX_ERR status. For these type of error, 1074 * these SMP_XXX_XX_ERR status. For these type of error,
1075 * we ask ihost user to retry the request. 1075 * we ask ihost user to retry the request.
1076 */ 1076 */
1077 scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, 1077 sci_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1078 SCI_FAILURE_RETRY_REQUIRED); 1078 SCI_FAILURE_RETRY_REQUIRED);
1079 1079
1080 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1080 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
@@ -1084,7 +1084,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq,
1084 /* All other completion status cause the IO to be complete. If a NAK 1084 /* All other completion status cause the IO to be complete. If a NAK
1085 * was received, then it is up to the user to retry the request 1085 * was received, then it is up to the user to retry the request
1086 */ 1086 */
1087 scic_sds_request_set_status(ireq, 1087 sci_request_set_status(ireq,
1088 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1088 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1089 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1089 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1090 1090
@@ -1101,7 +1101,7 @@ smp_request_await_tc_event(struct isci_request *ireq,
1101{ 1101{
1102 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1102 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1104 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, 1104 sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1105 SCI_SUCCESS); 1105 SCI_SUCCESS);
1106 1106
1107 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1107 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
@@ -1111,7 +1111,7 @@ smp_request_await_tc_event(struct isci_request *ireq,
1111 * complete. If a NAK was received, then it is up to 1111 * complete. If a NAK was received, then it is up to
1112 * the user to retry the request. 1112 * the user to retry the request.
1113 */ 1113 */
1114 scic_sds_request_set_status(ireq, 1114 sci_request_set_status(ireq,
1115 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1115 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1116 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1116 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1117 1117
@@ -1122,7 +1122,7 @@ smp_request_await_tc_event(struct isci_request *ireq,
1122 return SCI_SUCCESS; 1122 return SCI_SUCCESS;
1123} 1123}
1124 1124
1125void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, 1125void sci_stp_io_request_set_ncq_tag(struct isci_request *ireq,
1126 u16 ncq_tag) 1126 u16 ncq_tag)
1127{ 1127{
1128 /** 1128 /**
@@ -1171,7 +1171,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1171{ 1171{
1172 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1172 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1173 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1173 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1174 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, 1174 sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1175 SCI_SUCCESS); 1175 SCI_SUCCESS);
1176 1176
1177 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); 1177 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
@@ -1182,7 +1182,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1182 * complete. If a NAK was received, then it is up to 1182 * complete. If a NAK was received, then it is up to
1183 * the user to retry the request. 1183 * the user to retry the request.
1184 */ 1184 */
1185 scic_sds_request_set_status(ireq, 1185 sci_request_set_status(ireq,
1186 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1186 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1187 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1187 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1188 1188
@@ -1198,7 +1198,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1198/* transmit DATA_FIS from (current sgl + offset) for input 1198/* transmit DATA_FIS from (current sgl + offset) for input
1199 * parameter length. current sgl and offset is alreay stored in the IO request 1199 * parameter length. current sgl and offset is alreay stored in the IO request
1200 */ 1200 */
1201static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( 1201static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1202 struct isci_request *ireq, 1202 struct isci_request *ireq,
1203 u32 length) 1203 u32 length)
1204{ 1204{
@@ -1223,10 +1223,10 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1223 task_context->type.stp.fis_type = FIS_DATA; 1223 task_context->type.stp.fis_type = FIS_DATA;
1224 1224
1225 /* send the new TC out. */ 1225 /* send the new TC out. */
1226 return scic_controller_continue_io(ireq); 1226 return sci_controller_continue_io(ireq);
1227} 1227}
1228 1228
1229static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) 1229static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1230{ 1230{
1231 struct isci_stp_request *stp_req = &ireq->stp.req; 1231 struct isci_stp_request *stp_req = &ireq->stp.req;
1232 struct scu_sgl_element_pair *sgl_pair; 1232 struct scu_sgl_element_pair *sgl_pair;
@@ -1252,7 +1252,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is
1252 return SCI_SUCCESS; 1252 return SCI_SUCCESS;
1253 1253
1254 if (stp_req->pio_len >= len) { 1254 if (stp_req->pio_len >= len) {
1255 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len); 1255 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1256 if (status != SCI_SUCCESS) 1256 if (status != SCI_SUCCESS)
1257 return status; 1257 return status;
1258 stp_req->pio_len -= len; 1258 stp_req->pio_len -= len;
@@ -1261,7 +1261,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is
1261 sgl = pio_sgl_next(stp_req); 1261 sgl = pio_sgl_next(stp_req);
1262 offset = 0; 1262 offset = 0;
1263 } else if (stp_req->pio_len < len) { 1263 } else if (stp_req->pio_len < len) {
1264 scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); 1264 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1265 1265
1266 /* Sgl offset will be adjusted and saved for future */ 1266 /* Sgl offset will be adjusted and saved for future */
1267 offset += stp_req->pio_len; 1267 offset += stp_req->pio_len;
@@ -1284,7 +1284,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is
1284 * specified data region. enum sci_status 1284 * specified data region. enum sci_status
1285 */ 1285 */
1286static enum sci_status 1286static enum sci_status
1287scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1287sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1288 u8 *data_buf, u32 len) 1288 u8 *data_buf, u32 len)
1289{ 1289{
1290 struct isci_request *ireq; 1290 struct isci_request *ireq;
@@ -1328,7 +1328,7 @@ scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_r
1328 * 1328 *
1329 * Copy the data buffer to the io request data region. enum sci_status 1329 * Copy the data buffer to the io request data region. enum sci_status
1330 */ 1330 */
1331static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( 1331static enum sci_status sci_stp_request_pio_data_in_copy_data(
1332 struct isci_stp_request *stp_req, 1332 struct isci_stp_request *stp_req,
1333 u8 *data_buffer) 1333 u8 *data_buffer)
1334{ 1334{
@@ -1338,14 +1338,14 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1338 * If there is less than 1K remaining in the transfer request 1338 * If there is less than 1K remaining in the transfer request
1339 * copy just the data for the transfer */ 1339 * copy just the data for the transfer */
1340 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { 1340 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1341 status = scic_sds_stp_request_pio_data_in_copy_data_buffer( 1341 status = sci_stp_request_pio_data_in_copy_data_buffer(
1342 stp_req, data_buffer, stp_req->pio_len); 1342 stp_req, data_buffer, stp_req->pio_len);
1343 1343
1344 if (status == SCI_SUCCESS) 1344 if (status == SCI_SUCCESS)
1345 stp_req->pio_len = 0; 1345 stp_req->pio_len = 0;
1346 } else { 1346 } else {
1347 /* We are transfering the whole frame so copy */ 1347 /* We are transfering the whole frame so copy */
1348 status = scic_sds_stp_request_pio_data_in_copy_data_buffer( 1348 status = sci_stp_request_pio_data_in_copy_data_buffer(
1349 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); 1349 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1350 1350
1351 if (status == SCI_SUCCESS) 1351 if (status == SCI_SUCCESS)
@@ -1363,7 +1363,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1363 1363
1364 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1364 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1365 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1365 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1366 scic_sds_request_set_status(ireq, 1366 sci_request_set_status(ireq,
1367 SCU_TASK_DONE_GOOD, 1367 SCU_TASK_DONE_GOOD,
1368 SCI_SUCCESS); 1368 SCI_SUCCESS);
1369 1369
@@ -1375,7 +1375,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1375 * complete. If a NAK was received, then it is up to 1375 * complete. If a NAK was received, then it is up to
1376 * the user to retry the request. 1376 * the user to retry the request.
1377 */ 1377 */
1378 scic_sds_request_set_status(ireq, 1378 sci_request_set_status(ireq,
1379 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1379 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1380 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1380 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1381 1381
@@ -1398,7 +1398,7 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1398 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1398 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1399 /* Transmit data */ 1399 /* Transmit data */
1400 if (stp_req->pio_len != 0) { 1400 if (stp_req->pio_len != 0) {
1401 status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); 1401 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1402 if (status == SCI_SUCCESS) { 1402 if (status == SCI_SUCCESS) {
1403 if (stp_req->pio_len == 0) 1403 if (stp_req->pio_len == 0)
1404 all_frames_transferred = true; 1404 all_frames_transferred = true;
@@ -1426,7 +1426,7 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1426 * If a NAK was received, then it is up to the user to retry 1426 * If a NAK was received, then it is up to the user to retry
1427 * the request. 1427 * the request.
1428 */ 1428 */
1429 scic_sds_request_set_status( 1429 sci_request_set_status(
1430 ireq, 1430 ireq,
1431 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1431 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1432 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1432 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
@@ -1438,16 +1438,16 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1438 return status; 1438 return status;
1439} 1439}
1440 1440
1441static void scic_sds_stp_request_udma_complete_request( 1441static void sci_stp_request_udma_complete_request(
1442 struct isci_request *ireq, 1442 struct isci_request *ireq,
1443 u32 scu_status, 1443 u32 scu_status,
1444 enum sci_status sci_status) 1444 enum sci_status sci_status)
1445{ 1445{
1446 scic_sds_request_set_status(ireq, scu_status, sci_status); 1446 sci_request_set_status(ireq, scu_status, sci_status);
1447 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1447 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1448} 1448}
1449 1449
1450static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq, 1450static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1451 u32 frame_index) 1451 u32 frame_index)
1452{ 1452{
1453 struct isci_host *ihost = ireq->owning_controller; 1453 struct isci_host *ihost = ireq->owning_controller;
@@ -1455,28 +1455,28 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct is
1455 enum sci_status status; 1455 enum sci_status status;
1456 u32 *frame_buffer; 1456 u32 *frame_buffer;
1457 1457
1458 status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, 1458 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1459 frame_index, 1459 frame_index,
1460 (void **)&frame_header); 1460 (void **)&frame_header);
1461 1461
1462 if ((status == SCI_SUCCESS) && 1462 if ((status == SCI_SUCCESS) &&
1463 (frame_header->fis_type == FIS_REGD2H)) { 1463 (frame_header->fis_type == FIS_REGD2H)) {
1464 scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1464 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1465 frame_index, 1465 frame_index,
1466 (void **)&frame_buffer); 1466 (void **)&frame_buffer);
1467 1467
1468 scic_sds_controller_copy_sata_response(&ireq->stp.rsp, 1468 sci_controller_copy_sata_response(&ireq->stp.rsp,
1469 frame_header, 1469 frame_header,
1470 frame_buffer); 1470 frame_buffer);
1471 } 1471 }
1472 1472
1473 scic_sds_controller_release_frame(ihost, frame_index); 1473 sci_controller_release_frame(ihost, frame_index);
1474 1474
1475 return status; 1475 return status;
1476} 1476}
1477 1477
1478enum sci_status 1478enum sci_status
1479scic_sds_io_request_frame_handler(struct isci_request *ireq, 1479sci_io_request_frame_handler(struct isci_request *ireq,
1480 u32 frame_index) 1480 u32 frame_index)
1481{ 1481{
1482 struct isci_host *ihost = ireq->owning_controller; 1482 struct isci_host *ihost = ireq->owning_controller;
@@ -1491,7 +1491,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1491 struct ssp_frame_hdr ssp_hdr; 1491 struct ssp_frame_hdr ssp_hdr;
1492 void *frame_header; 1492 void *frame_header;
1493 1493
1494 scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, 1494 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1495 frame_index, 1495 frame_index,
1496 &frame_header); 1496 &frame_header);
1497 1497
@@ -1502,7 +1502,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1502 struct ssp_response_iu *resp_iu; 1502 struct ssp_response_iu *resp_iu;
1503 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1503 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1504 1504
1505 scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1505 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1506 frame_index, 1506 frame_index,
1507 (void **)&resp_iu); 1507 (void **)&resp_iu);
1508 1508
@@ -1512,11 +1512,11 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1512 1512
1513 if (resp_iu->datapres == 0x01 || 1513 if (resp_iu->datapres == 0x01 ||
1514 resp_iu->datapres == 0x02) { 1514 resp_iu->datapres == 0x02) {
1515 scic_sds_request_set_status(ireq, 1515 sci_request_set_status(ireq,
1516 SCU_TASK_DONE_CHECK_RESPONSE, 1516 SCU_TASK_DONE_CHECK_RESPONSE,
1517 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1517 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1518 } else 1518 } else
1519 scic_sds_request_set_status(ireq, 1519 sci_request_set_status(ireq,
1520 SCU_TASK_DONE_GOOD, 1520 SCU_TASK_DONE_GOOD,
1521 SCI_SUCCESS); 1521 SCI_SUCCESS);
1522 } else { 1522 } else {
@@ -1531,22 +1531,22 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1531 * In any case we are done with this frame buffer return it to 1531 * In any case we are done with this frame buffer return it to
1532 * the controller 1532 * the controller
1533 */ 1533 */
1534 scic_sds_controller_release_frame(ihost, frame_index); 1534 sci_controller_release_frame(ihost, frame_index);
1535 1535
1536 return SCI_SUCCESS; 1536 return SCI_SUCCESS;
1537 } 1537 }
1538 1538
1539 case SCI_REQ_TASK_WAIT_TC_RESP: 1539 case SCI_REQ_TASK_WAIT_TC_RESP:
1540 scic_sds_io_request_copy_response(ireq); 1540 sci_io_request_copy_response(ireq);
1541 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1541 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1542 scic_sds_controller_release_frame(ihost,frame_index); 1542 sci_controller_release_frame(ihost, frame_index);
1543 return SCI_SUCCESS; 1543 return SCI_SUCCESS;
1544 1544
1545 case SCI_REQ_SMP_WAIT_RESP: { 1545 case SCI_REQ_SMP_WAIT_RESP: {
1546 struct smp_resp *rsp_hdr = &ireq->smp.rsp; 1546 struct smp_resp *rsp_hdr = &ireq->smp.rsp;
1547 void *frame_header; 1547 void *frame_header;
1548 1548
1549 scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, 1549 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1550 frame_index, 1550 frame_index,
1551 &frame_header); 1551 &frame_header);
1552 1552
@@ -1557,7 +1557,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1557 if (rsp_hdr->frame_type == SMP_RESPONSE) { 1557 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1558 void *smp_resp; 1558 void *smp_resp;
1559 1559
1560 scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1560 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1561 frame_index, 1561 frame_index,
1562 &smp_resp); 1562 &smp_resp);
1563 1563
@@ -1567,7 +1567,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1567 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, 1567 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1568 smp_resp, word_cnt); 1568 smp_resp, word_cnt);
1569 1569
1570 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, 1570 sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1571 SCI_SUCCESS); 1571 SCI_SUCCESS);
1572 1572
1573 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); 1573 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
@@ -1584,31 +1584,31 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1584 frame_index, 1584 frame_index,
1585 rsp_hdr->frame_type); 1585 rsp_hdr->frame_type);
1586 1586
1587 scic_sds_request_set_status(ireq, 1587 sci_request_set_status(ireq,
1588 SCU_TASK_DONE_SMP_FRM_TYPE_ERR, 1588 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1589 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1589 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1590 1590
1591 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1591 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1592 } 1592 }
1593 1593
1594 scic_sds_controller_release_frame(ihost, frame_index); 1594 sci_controller_release_frame(ihost, frame_index);
1595 1595
1596 return SCI_SUCCESS; 1596 return SCI_SUCCESS;
1597 } 1597 }
1598 1598
1599 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1599 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1600 return scic_sds_stp_request_udma_general_frame_handler(ireq, 1600 return sci_stp_request_udma_general_frame_handler(ireq,
1601 frame_index); 1601 frame_index);
1602 1602
1603 case SCI_REQ_STP_UDMA_WAIT_D2H: 1603 case SCI_REQ_STP_UDMA_WAIT_D2H:
1604 /* Use the general frame handler to copy the resposne data */ 1604 /* Use the general frame handler to copy the resposne data */
1605 status = scic_sds_stp_request_udma_general_frame_handler(ireq, 1605 status = sci_stp_request_udma_general_frame_handler(ireq,
1606 frame_index); 1606 frame_index);
1607 1607
1608 if (status != SCI_SUCCESS) 1608 if (status != SCI_SUCCESS)
1609 return status; 1609 return status;
1610 1610
1611 scic_sds_stp_request_udma_complete_request(ireq, 1611 sci_stp_request_udma_complete_request(ireq,
1612 SCU_TASK_DONE_CHECK_RESPONSE, 1612 SCU_TASK_DONE_CHECK_RESPONSE,
1613 SCI_FAILURE_IO_RESPONSE_VALID); 1613 SCI_FAILURE_IO_RESPONSE_VALID);
1614 1614
@@ -1618,7 +1618,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1618 struct dev_to_host_fis *frame_header; 1618 struct dev_to_host_fis *frame_header;
1619 u32 *frame_buffer; 1619 u32 *frame_buffer;
1620 1620
1621 status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, 1621 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1622 frame_index, 1622 frame_index,
1623 (void **)&frame_header); 1623 (void **)&frame_header);
1624 1624
@@ -1636,16 +1636,16 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1636 1636
1637 switch (frame_header->fis_type) { 1637 switch (frame_header->fis_type) {
1638 case FIS_REGD2H: 1638 case FIS_REGD2H:
1639 scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1639 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1640 frame_index, 1640 frame_index,
1641 (void **)&frame_buffer); 1641 (void **)&frame_buffer);
1642 1642
1643 scic_sds_controller_copy_sata_response(&ireq->stp.rsp, 1643 sci_controller_copy_sata_response(&ireq->stp.rsp,
1644 frame_header, 1644 frame_header,
1645 frame_buffer); 1645 frame_buffer);
1646 1646
1647 /* The command has completed with error */ 1647 /* The command has completed with error */
1648 scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, 1648 sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE,
1649 SCI_FAILURE_IO_RESPONSE_VALID); 1649 SCI_FAILURE_IO_RESPONSE_VALID);
1650 break; 1650 break;
1651 1651
@@ -1655,7 +1655,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1655 "violation occurred\n", __func__, stp_req, 1655 "violation occurred\n", __func__, stp_req,
1656 frame_index); 1656 frame_index);
1657 1657
1658 scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, 1658 sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS,
1659 SCI_FAILURE_PROTOCOL_VIOLATION); 1659 SCI_FAILURE_PROTOCOL_VIOLATION);
1660 break; 1660 break;
1661 } 1661 }
@@ -1663,7 +1663,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1663 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1663 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1664 1664
1665 /* Frame has been decoded return it to the controller */ 1665 /* Frame has been decoded return it to the controller */
1666 scic_sds_controller_release_frame(ihost, frame_index); 1666 sci_controller_release_frame(ihost, frame_index);
1667 1667
1668 return status; 1668 return status;
1669 } 1669 }
@@ -1673,7 +1673,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1673 struct dev_to_host_fis *frame_header; 1673 struct dev_to_host_fis *frame_header;
1674 u32 *frame_buffer; 1674 u32 *frame_buffer;
1675 1675
1676 status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, 1676 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1677 frame_index, 1677 frame_index,
1678 (void **)&frame_header); 1678 (void **)&frame_header);
1679 1679
@@ -1688,7 +1688,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1688 switch (frame_header->fis_type) { 1688 switch (frame_header->fis_type) {
1689 case FIS_PIO_SETUP: 1689 case FIS_PIO_SETUP:
1690 /* Get from the frame buffer the PIO Setup Data */ 1690 /* Get from the frame buffer the PIO Setup Data */
1691 scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1691 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1692 frame_index, 1692 frame_index,
1693 (void **)&frame_buffer); 1693 (void **)&frame_buffer);
1694 1694
@@ -1704,7 +1704,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1704 /* status: 4th byte in the 3rd dword */ 1704 /* status: 4th byte in the 3rd dword */
1705 stp_req->status = (frame_buffer[2] >> 24) & 0xff; 1705 stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1706 1706
1707 scic_sds_controller_copy_sata_response(&ireq->stp.rsp, 1707 sci_controller_copy_sata_response(&ireq->stp.rsp,
1708 frame_header, 1708 frame_header,
1709 frame_buffer); 1709 frame_buffer);
1710 1710
@@ -1717,7 +1717,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1717 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); 1717 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1718 } else if (task->data_dir == DMA_TO_DEVICE) { 1718 } else if (task->data_dir == DMA_TO_DEVICE) {
1719 /* Transmit data */ 1719 /* Transmit data */
1720 status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); 1720 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1721 if (status != SCI_SUCCESS) 1721 if (status != SCI_SUCCESS)
1722 break; 1722 break;
1723 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); 1723 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
@@ -1745,15 +1745,15 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1745 break; 1745 break;
1746 } 1746 }
1747 1747
1748 scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1748 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1749 frame_index, 1749 frame_index,
1750 (void **)&frame_buffer); 1750 (void **)&frame_buffer);
1751 1751
1752 scic_sds_controller_copy_sata_response(&ireq->stp.req, 1752 sci_controller_copy_sata_response(&ireq->stp.req,
1753 frame_header, 1753 frame_header,
1754 frame_buffer); 1754 frame_buffer);
1755 1755
1756 scic_sds_request_set_status(ireq, 1756 sci_request_set_status(ireq,
1757 SCU_TASK_DONE_CHECK_RESPONSE, 1757 SCU_TASK_DONE_CHECK_RESPONSE,
1758 SCI_FAILURE_IO_RESPONSE_VALID); 1758 SCI_FAILURE_IO_RESPONSE_VALID);
1759 1759
@@ -1766,7 +1766,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1766 } 1766 }
1767 1767
1768 /* Frame is decoded return it to the controller */ 1768 /* Frame is decoded return it to the controller */
1769 scic_sds_controller_release_frame(ihost, frame_index); 1769 sci_controller_release_frame(ihost, frame_index);
1770 1770
1771 return status; 1771 return status;
1772 } 1772 }
@@ -1775,7 +1775,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1775 struct dev_to_host_fis *frame_header; 1775 struct dev_to_host_fis *frame_header;
1776 struct sata_fis_data *frame_buffer; 1776 struct sata_fis_data *frame_buffer;
1777 1777
1778 status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, 1778 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1779 frame_index, 1779 frame_index,
1780 (void **)&frame_header); 1780 (void **)&frame_header);
1781 1781
@@ -1800,14 +1800,14 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1800 frame_index, 1800 frame_index,
1801 frame_header->fis_type); 1801 frame_header->fis_type);
1802 1802
1803 scic_sds_request_set_status(ireq, 1803 sci_request_set_status(ireq,
1804 SCU_TASK_DONE_GOOD, 1804 SCU_TASK_DONE_GOOD,
1805 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); 1805 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1806 1806
1807 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1807 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1808 1808
1809 /* Frame is decoded return it to the controller */ 1809 /* Frame is decoded return it to the controller */
1810 scic_sds_controller_release_frame(ihost, frame_index); 1810 sci_controller_release_frame(ihost, frame_index);
1811 return status; 1811 return status;
1812 } 1812 }
1813 1813
@@ -1815,15 +1815,15 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1815 ireq->saved_rx_frame_index = frame_index; 1815 ireq->saved_rx_frame_index = frame_index;
1816 stp_req->pio_len = 0; 1816 stp_req->pio_len = 0;
1817 } else { 1817 } else {
1818 scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1818 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1819 frame_index, 1819 frame_index,
1820 (void **)&frame_buffer); 1820 (void **)&frame_buffer);
1821 1821
1822 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, 1822 status = sci_stp_request_pio_data_in_copy_data(stp_req,
1823 (u8 *)frame_buffer); 1823 (u8 *)frame_buffer);
1824 1824
1825 /* Frame is decoded return it to the controller */ 1825 /* Frame is decoded return it to the controller */
1826 scic_sds_controller_release_frame(ihost, frame_index); 1826 sci_controller_release_frame(ihost, frame_index);
1827 } 1827 }
1828 1828
1829 /* Check for the end of the transfer, are there more 1829 /* Check for the end of the transfer, are there more
@@ -1833,7 +1833,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1833 return status; 1833 return status;
1834 1834
1835 if ((stp_req->status & ATA_BUSY) == 0) { 1835 if ((stp_req->status & ATA_BUSY) == 0) {
1836 scic_sds_request_set_status(ireq, 1836 sci_request_set_status(ireq,
1837 SCU_TASK_DONE_CHECK_RESPONSE, 1837 SCU_TASK_DONE_CHECK_RESPONSE,
1838 SCI_FAILURE_IO_RESPONSE_VALID); 1838 SCI_FAILURE_IO_RESPONSE_VALID);
1839 1839
@@ -1848,7 +1848,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1848 struct dev_to_host_fis *frame_header; 1848 struct dev_to_host_fis *frame_header;
1849 u32 *frame_buffer; 1849 u32 *frame_buffer;
1850 1850
1851 status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, 1851 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1852 frame_index, 1852 frame_index,
1853 (void **)&frame_header); 1853 (void **)&frame_header);
1854 if (status != SCI_SUCCESS) { 1854 if (status != SCI_SUCCESS) {
@@ -1864,16 +1864,16 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1864 1864
1865 switch (frame_header->fis_type) { 1865 switch (frame_header->fis_type) {
1866 case FIS_REGD2H: 1866 case FIS_REGD2H:
1867 scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1867 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1868 frame_index, 1868 frame_index,
1869 (void **)&frame_buffer); 1869 (void **)&frame_buffer);
1870 1870
1871 scic_sds_controller_copy_sata_response(&ireq->stp.rsp, 1871 sci_controller_copy_sata_response(&ireq->stp.rsp,
1872 frame_header, 1872 frame_header,
1873 frame_buffer); 1873 frame_buffer);
1874 1874
1875 /* The command has completed with error */ 1875 /* The command has completed with error */
1876 scic_sds_request_set_status(ireq, 1876 sci_request_set_status(ireq,
1877 SCU_TASK_DONE_CHECK_RESPONSE, 1877 SCU_TASK_DONE_CHECK_RESPONSE,
1878 SCI_FAILURE_IO_RESPONSE_VALID); 1878 SCI_FAILURE_IO_RESPONSE_VALID);
1879 break; 1879 break;
@@ -1886,7 +1886,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1886 stp_req, 1886 stp_req,
1887 frame_index); 1887 frame_index);
1888 1888
1889 scic_sds_request_set_status(ireq, 1889 sci_request_set_status(ireq,
1890 SCU_TASK_DONE_UNEXP_FIS, 1890 SCU_TASK_DONE_UNEXP_FIS,
1891 SCI_FAILURE_PROTOCOL_VIOLATION); 1891 SCI_FAILURE_PROTOCOL_VIOLATION);
1892 break; 1892 break;
@@ -1895,7 +1895,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1895 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1895 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1896 1896
1897 /* Frame has been decoded return it to the controller */ 1897 /* Frame has been decoded return it to the controller */
1898 scic_sds_controller_release_frame(ihost, frame_index); 1898 sci_controller_release_frame(ihost, frame_index);
1899 1899
1900 return status; 1900 return status;
1901 } 1901 }
@@ -1904,7 +1904,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1904 * TODO: Is it even possible to get an unsolicited frame in the 1904 * TODO: Is it even possible to get an unsolicited frame in the
1905 * aborting state? 1905 * aborting state?
1906 */ 1906 */
1907 scic_sds_controller_release_frame(ihost, frame_index); 1907 sci_controller_release_frame(ihost, frame_index);
1908 return SCI_SUCCESS; 1908 return SCI_SUCCESS;
1909 1909
1910 default: 1910 default:
@@ -1915,7 +1915,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq,
1915 frame_index, 1915 frame_index,
1916 state); 1916 state);
1917 1917
1918 scic_sds_controller_release_frame(ihost, frame_index); 1918 sci_controller_release_frame(ihost, frame_index);
1919 return SCI_FAILURE_INVALID_STATE; 1919 return SCI_FAILURE_INVALID_STATE;
1920 } 1920 }
1921} 1921}
@@ -1927,7 +1927,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
1927 1927
1928 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1928 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1929 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1929 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1930 scic_sds_stp_request_udma_complete_request(ireq, 1930 sci_stp_request_udma_complete_request(ireq,
1931 SCU_TASK_DONE_GOOD, 1931 SCU_TASK_DONE_GOOD,
1932 SCI_SUCCESS); 1932 SCI_SUCCESS);
1933 break; 1933 break;
@@ -1938,10 +1938,10 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
1938 * completion. 1938 * completion.
1939 */ 1939 */
1940 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 1940 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
1941 scic_sds_remote_device_suspend(ireq->target_device, 1941 sci_remote_device_suspend(ireq->target_device,
1942 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 1942 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1943 1943
1944 scic_sds_stp_request_udma_complete_request(ireq, 1944 sci_stp_request_udma_complete_request(ireq,
1945 SCU_TASK_DONE_CHECK_RESPONSE, 1945 SCU_TASK_DONE_CHECK_RESPONSE,
1946 SCI_FAILURE_IO_RESPONSE_VALID); 1946 SCI_FAILURE_IO_RESPONSE_VALID);
1947 } else { 1947 } else {
@@ -1965,12 +1965,12 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
1965 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): 1965 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1966 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): 1966 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1967 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): 1967 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1968 scic_sds_remote_device_suspend(ireq->target_device, 1968 sci_remote_device_suspend(ireq->target_device,
1969 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 1969 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1970 /* Fall through to the default case */ 1970 /* Fall through to the default case */
1971 default: 1971 default:
1972 /* All other completion status cause the IO to be complete. */ 1972 /* All other completion status cause the IO to be complete. */
1973 scic_sds_stp_request_udma_complete_request(ireq, 1973 sci_stp_request_udma_complete_request(ireq,
1974 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1974 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1975 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1975 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1976 break; 1976 break;
@@ -1985,7 +1985,7 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
1985{ 1985{
1986 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1986 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1987 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1987 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1988 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, 1988 sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1989 SCI_SUCCESS); 1989 SCI_SUCCESS);
1990 1990
1991 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); 1991 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
@@ -1997,7 +1997,7 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
1997 * If a NAK was received, then it is up to the user to retry 1997 * If a NAK was received, then it is up to the user to retry
1998 * the request. 1998 * the request.
1999 */ 1999 */
2000 scic_sds_request_set_status(ireq, 2000 sci_request_set_status(ireq,
2001 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 2001 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2002 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 2002 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2003 2003
@@ -2014,7 +2014,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2014{ 2014{
2015 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2015 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2016 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2016 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2017 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, 2017 sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
2018 SCI_SUCCESS); 2018 SCI_SUCCESS);
2019 2019
2020 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); 2020 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
@@ -2025,7 +2025,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2025 * a NAK was received, then it is up to the user to retry the 2025 * a NAK was received, then it is up to the user to retry the
2026 * request. 2026 * request.
2027 */ 2027 */
2028 scic_sds_request_set_status(ireq, 2028 sci_request_set_status(ireq,
2029 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 2029 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2030 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 2030 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2031 2031
@@ -2037,7 +2037,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2037} 2037}
2038 2038
2039enum sci_status 2039enum sci_status
2040scic_sds_io_request_tc_completion(struct isci_request *ireq, 2040sci_io_request_tc_completion(struct isci_request *ireq,
2041 u32 completion_code) 2041 u32 completion_code)
2042{ 2042{
2043 enum sci_base_request_states state; 2043 enum sci_base_request_states state;
@@ -2832,7 +2832,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
2832 ); 2832 );
2833 2833
2834 /* complete the io request to the core. */ 2834 /* complete the io request to the core. */
2835 scic_controller_complete_io(ihost, request->target_device, request); 2835 sci_controller_complete_io(ihost, request->target_device, request);
2836 isci_put_device(idev); 2836 isci_put_device(idev);
2837 2837
2838 /* set terminated handle so it cannot be completed or 2838 /* set terminated handle so it cannot be completed or
@@ -2842,7 +2842,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
2842 set_bit(IREQ_TERMINATED, &request->flags); 2842 set_bit(IREQ_TERMINATED, &request->flags);
2843} 2843}
2844 2844
2845static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) 2845static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2846{ 2846{
2847 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2847 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2848 struct domain_device *dev = ireq->target_device->domain_dev; 2848 struct domain_device *dev = ireq->target_device->domain_dev;
@@ -2879,7 +2879,7 @@ static void scic_sds_request_started_state_enter(struct sci_base_state_machine *
2879 } 2879 }
2880} 2880}
2881 2881
2882static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) 2882static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
2883{ 2883{
2884 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2884 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2885 struct isci_host *ihost = ireq->owning_controller; 2885 struct isci_host *ihost = ireq->owning_controller;
@@ -2892,7 +2892,7 @@ static void scic_sds_request_completed_state_enter(struct sci_base_state_machine
2892 isci_task_request_complete(ihost, ireq, ireq->sci_status); 2892 isci_task_request_complete(ihost, ireq, ireq->sci_status);
2893} 2893}
2894 2894
2895static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm) 2895static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
2896{ 2896{
2897 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2897 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2898 2898
@@ -2900,31 +2900,31 @@ static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine
2900 ireq->tc->abort = 1; 2900 ireq->tc->abort = 1;
2901} 2901}
2902 2902
2903static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 2903static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2904{ 2904{
2905 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2905 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2906 2906
2907 scic_sds_remote_device_set_working_request(ireq->target_device, 2907 sci_remote_device_set_working_request(ireq->target_device,
2908 ireq); 2908 ireq);
2909} 2909}
2910 2910
2911static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 2911static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2912{ 2912{
2913 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2913 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2914 2914
2915 scic_sds_remote_device_set_working_request(ireq->target_device, 2915 sci_remote_device_set_working_request(ireq->target_device,
2916 ireq); 2916 ireq);
2917} 2917}
2918 2918
2919static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) 2919static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
2920{ 2920{
2921 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2921 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2922 2922
2923 scic_sds_remote_device_set_working_request(ireq->target_device, 2923 sci_remote_device_set_working_request(ireq->target_device,
2924 ireq); 2924 ireq);
2925} 2925}
2926 2926
2927static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) 2927static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
2928{ 2928{
2929 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2929 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2930 struct scu_task_context *tc = ireq->tc; 2930 struct scu_task_context *tc = ireq->tc;
@@ -2938,22 +2938,22 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_complet
2938 /* Clear the TC control bit */ 2938 /* Clear the TC control bit */
2939 tc->control_frame = 0; 2939 tc->control_frame = 0;
2940 2940
2941 status = scic_controller_continue_io(ireq); 2941 status = sci_controller_continue_io(ireq);
2942 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); 2942 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
2943} 2943}
2944 2944
2945static const struct sci_base_state scic_sds_request_state_table[] = { 2945static const struct sci_base_state sci_request_state_table[] = {
2946 [SCI_REQ_INIT] = { }, 2946 [SCI_REQ_INIT] = { },
2947 [SCI_REQ_CONSTRUCTED] = { }, 2947 [SCI_REQ_CONSTRUCTED] = { },
2948 [SCI_REQ_STARTED] = { 2948 [SCI_REQ_STARTED] = {
2949 .enter_state = scic_sds_request_started_state_enter, 2949 .enter_state = sci_request_started_state_enter,
2950 }, 2950 },
2951 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { 2951 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
2952 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, 2952 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
2953 }, 2953 },
2954 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, 2954 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
2955 [SCI_REQ_STP_PIO_WAIT_H2D] = { 2955 [SCI_REQ_STP_PIO_WAIT_H2D] = {
2956 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, 2956 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
2957 }, 2957 },
2958 [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, 2958 [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
2959 [SCI_REQ_STP_PIO_DATA_IN] = { }, 2959 [SCI_REQ_STP_PIO_DATA_IN] = { },
@@ -2961,10 +2961,10 @@ static const struct sci_base_state scic_sds_request_state_table[] = {
2961 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 2961 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
2962 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 2962 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
2963 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = { 2963 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
2964 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, 2964 .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
2965 }, 2965 },
2966 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = { 2966 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
2967 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, 2967 .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
2968 }, 2968 },
2969 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { }, 2969 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
2970 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 2970 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
@@ -2972,20 +2972,20 @@ static const struct sci_base_state scic_sds_request_state_table[] = {
2972 [SCI_REQ_SMP_WAIT_RESP] = { }, 2972 [SCI_REQ_SMP_WAIT_RESP] = { },
2973 [SCI_REQ_SMP_WAIT_TC_COMP] = { }, 2973 [SCI_REQ_SMP_WAIT_TC_COMP] = { },
2974 [SCI_REQ_COMPLETED] = { 2974 [SCI_REQ_COMPLETED] = {
2975 .enter_state = scic_sds_request_completed_state_enter, 2975 .enter_state = sci_request_completed_state_enter,
2976 }, 2976 },
2977 [SCI_REQ_ABORTING] = { 2977 [SCI_REQ_ABORTING] = {
2978 .enter_state = scic_sds_request_aborting_state_enter, 2978 .enter_state = sci_request_aborting_state_enter,
2979 }, 2979 },
2980 [SCI_REQ_FINAL] = { }, 2980 [SCI_REQ_FINAL] = { },
2981}; 2981};
2982 2982
2983static void 2983static void
2984scic_sds_general_request_construct(struct isci_host *ihost, 2984sci_general_request_construct(struct isci_host *ihost,
2985 struct isci_remote_device *idev, 2985 struct isci_remote_device *idev,
2986 struct isci_request *ireq) 2986 struct isci_request *ireq)
2987{ 2987{
2988 sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT); 2988 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
2989 2989
2990 ireq->target_device = idev; 2990 ireq->target_device = idev;
2991 ireq->protocol = SCIC_NO_PROTOCOL; 2991 ireq->protocol = SCIC_NO_PROTOCOL;
@@ -2997,7 +2997,7 @@ scic_sds_general_request_construct(struct isci_host *ihost,
2997} 2997}
2998 2998
2999static enum sci_status 2999static enum sci_status
3000scic_io_request_construct(struct isci_host *ihost, 3000sci_io_request_construct(struct isci_host *ihost,
3001 struct isci_remote_device *idev, 3001 struct isci_remote_device *idev,
3002 struct isci_request *ireq) 3002 struct isci_request *ireq)
3003{ 3003{
@@ -3005,7 +3005,7 @@ scic_io_request_construct(struct isci_host *ihost,
3005 enum sci_status status = SCI_SUCCESS; 3005 enum sci_status status = SCI_SUCCESS;
3006 3006
3007 /* Build the common part of the request */ 3007 /* Build the common part of the request */
3008 scic_sds_general_request_construct(ihost, idev, ireq); 3008 sci_general_request_construct(ihost, idev, ireq);
3009 3009
3010 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3010 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3011 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3011 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
@@ -3024,7 +3024,7 @@ scic_io_request_construct(struct isci_host *ihost,
3024 return status; 3024 return status;
3025} 3025}
3026 3026
3027enum sci_status scic_task_request_construct(struct isci_host *ihost, 3027enum sci_status sci_task_request_construct(struct isci_host *ihost,
3028 struct isci_remote_device *idev, 3028 struct isci_remote_device *idev,
3029 u16 io_tag, struct isci_request *ireq) 3029 u16 io_tag, struct isci_request *ireq)
3030{ 3030{
@@ -3032,7 +3032,7 @@ enum sci_status scic_task_request_construct(struct isci_host *ihost,
3032 enum sci_status status = SCI_SUCCESS; 3032 enum sci_status status = SCI_SUCCESS;
3033 3033
3034 /* Build the common part of the request */ 3034 /* Build the common part of the request */
3035 scic_sds_general_request_construct(ihost, idev, ireq); 3035 sci_general_request_construct(ihost, idev, ireq);
3036 3036
3037 if (dev->dev_type == SAS_END_DEV || 3037 if (dev->dev_type == SAS_END_DEV ||
3038 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 3038 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
@@ -3053,7 +3053,7 @@ static enum sci_status isci_request_ssp_request_construct(
3053 "%s: request = %p\n", 3053 "%s: request = %p\n",
3054 __func__, 3054 __func__,
3055 request); 3055 request);
3056 status = scic_io_request_construct_basic_ssp(request); 3056 status = sci_io_request_construct_basic_ssp(request);
3057 return status; 3057 return status;
3058} 3058}
3059 3059
@@ -3074,7 +3074,7 @@ static enum sci_status isci_request_stp_request_construct(
3074 */ 3074 */
3075 register_fis = isci_sata_task_to_fis_copy(task); 3075 register_fis = isci_sata_task_to_fis_copy(task);
3076 3076
3077 status = scic_io_request_construct_basic_sata(request); 3077 status = sci_io_request_construct_basic_sata(request);
3078 3078
3079 /* Set the ncq tag in the fis, from the queue 3079 /* Set the ncq tag in the fis, from the queue
3080 * command in the task. 3080 * command in the task.
@@ -3091,7 +3091,7 @@ static enum sci_status isci_request_stp_request_construct(
3091} 3091}
3092 3092
3093static enum sci_status 3093static enum sci_status
3094scic_io_request_construct_smp(struct device *dev, 3094sci_io_request_construct_smp(struct device *dev,
3095 struct isci_request *ireq, 3095 struct isci_request *ireq,
3096 struct sas_task *task) 3096 struct sas_task *task)
3097{ 3097{
@@ -3141,8 +3141,8 @@ scic_io_request_construct_smp(struct device *dev,
3141 3141
3142 task_context = ireq->tc; 3142 task_context = ireq->tc;
3143 3143
3144 idev = scic_sds_request_get_device(ireq); 3144 idev = sci_request_get_device(ireq);
3145 iport = scic_sds_request_get_port(ireq); 3145 iport = sci_request_get_port(ireq);
3146 3146
3147 /* 3147 /*
3148 * Fill in the TC with the its required data 3148 * Fill in the TC with the its required data
@@ -3152,8 +3152,8 @@ scic_io_request_construct_smp(struct device *dev,
3152 task_context->initiator_request = 1; 3152 task_context->initiator_request = 1;
3153 task_context->connection_rate = idev->connection_rate; 3153 task_context->connection_rate = idev->connection_rate;
3154 task_context->protocol_engine_index = 3154 task_context->protocol_engine_index =
3155 scic_sds_controller_get_protocol_engine_group(ihost); 3155 sci_controller_get_protocol_engine_group(ihost);
3156 task_context->logical_port_index = scic_sds_port_get_index(iport); 3156 task_context->logical_port_index = sci_port_get_index(iport);
3157 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; 3157 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3158 task_context->abort = 0; 3158 task_context->abort = 0;
3159 task_context->valid = SCU_TASK_CONTEXT_VALID; 3159 task_context->valid = SCU_TASK_CONTEXT_VALID;
@@ -3195,9 +3195,9 @@ scic_io_request_construct_smp(struct device *dev,
3195 task_context->task_phase = 0; 3195 task_context->task_phase = 0;
3196 3196
3197 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3197 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3198 (scic_sds_controller_get_protocol_engine_group(ihost) << 3198 (sci_controller_get_protocol_engine_group(ihost) <<
3199 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3199 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3200 (scic_sds_port_get_index(iport) << 3200 (sci_port_get_index(iport) <<
3201 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3201 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3202 ISCI_TAG_TCI(ireq->io_tag)); 3202 ISCI_TAG_TCI(ireq->io_tag));
3203 /* 3203 /*
@@ -3229,7 +3229,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3229 struct device *dev = &ireq->isci_host->pdev->dev; 3229 struct device *dev = &ireq->isci_host->pdev->dev;
3230 enum sci_status status = SCI_FAILURE; 3230 enum sci_status status = SCI_FAILURE;
3231 3231
3232 status = scic_io_request_construct_smp(dev, ireq, task); 3232 status = sci_io_request_construct_smp(dev, ireq, task);
3233 if (status != SCI_SUCCESS) 3233 if (status != SCI_SUCCESS)
3234 dev_warn(&ireq->isci_host->pdev->dev, 3234 dev_warn(&ireq->isci_host->pdev->dev,
3235 "%s: failed with status = %d\n", 3235 "%s: failed with status = %d\n",
@@ -3283,7 +3283,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
3283 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3283 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3284 } 3284 }
3285 3285
3286 status = scic_io_request_construct(ihost, idev, request); 3286 status = sci_io_request_construct(ihost, idev, request);
3287 3287
3288 if (status != SCI_SUCCESS) { 3288 if (status != SCI_SUCCESS) {
3289 dev_warn(&ihost->pdev->dev, 3289 dev_warn(&ihost->pdev->dev,
@@ -3388,7 +3388,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
3388 * request was built that way (ie. 3388 * request was built that way (ie.
3389 * ireq->is_task_management_request is false). 3389 * ireq->is_task_management_request is false).
3390 */ 3390 */
3391 status = scic_controller_start_task(ihost, 3391 status = sci_controller_start_task(ihost,
3392 idev, 3392 idev,
3393 ireq); 3393 ireq);
3394 } else { 3394 } else {
@@ -3396,7 +3396,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
3396 } 3396 }
3397 } else { 3397 } else {
3398 /* send the request, let the core assign the IO TAG. */ 3398 /* send the request, let the core assign the IO TAG. */
3399 status = scic_controller_start_io(ihost, idev, 3399 status = sci_controller_start_io(ihost, idev,
3400 ireq); 3400 ireq);
3401 } 3401 }
3402 3402