aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-05-10 05:28:49 -0400
committerDan Williams <dan.j.williams@intel.com>2011-07-03 07:04:47 -0400
commit5dec6f4e41340196d223caf922578c44dfe2295a (patch)
treedf5c8395f8cbabd1f4b77200b7e339cfbcd72e11 /drivers/scsi
parentc72086e3c2897eaca5b99c005dd9844fdc784981 (diff)
isci: merge stp request substates into primary state machine
Remove usage of the request substate machine for stp requests, and kill the request substate infrastructure. Similar to the previous conversions this adds the substates to the primary state machine and arranges for the 'started' state to transition to the proper stp substate. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/isci/Makefile1
-rw-r--r--drivers/scsi/isci/request.c1605
-rw-r--r--drivers/scsi/isci/request.h113
-rw-r--r--drivers/scsi/isci/stp_request.c1584
-rw-r--r--drivers/scsi/isci/stp_request.h195
5 files changed, 1571 insertions, 1927 deletions
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
index be2b67d0df89..86b0c88e91a6 100644
--- a/drivers/scsi/isci/Makefile
+++ b/drivers/scsi/isci/Makefile
@@ -6,5 +6,4 @@ isci-objs := init.o phy.o request.o sata.o \
6 remote_node_context.o \ 6 remote_node_context.o \
7 remote_node_table.o \ 7 remote_node_table.o \
8 unsolicited_frame_control.o \ 8 unsolicited_frame_control.o \
9 stp_request.o \
10 port_config.o \ 9 port_config.o \
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 5201dc58a191..f503e3e18d8f 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -58,6 +58,7 @@
58#include "request.h" 58#include "request.h"
59#include "sata.h" 59#include "sata.h"
60#include "scu_completion_codes.h" 60#include "scu_completion_codes.h"
61#include "scu_event_codes.h"
61#include "sas.h" 62#include "sas.h"
62 63
63/** 64/**
@@ -92,7 +93,7 @@ static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
92 * the Scatter-Gather List. 93 * the Scatter-Gather List.
93 * 94 *
94 */ 95 */
95void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) 96static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
96{ 97{
97 struct isci_request *isci_request = sci_req_to_ireq(sds_request); 98 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
98 struct isci_host *isci_host = isci_request->isci_host; 99 struct isci_host *isci_host = isci_request->isci_host;
@@ -366,27 +367,214 @@ static void scu_ssp_task_request_construct_task_context(
366 sizeof(struct ssp_task_iu) / sizeof(u32); 367 sizeof(struct ssp_task_iu) / sizeof(u32);
367} 368}
368 369
370/**
371 * This method is will fill in the SCU Task Context for any type of SATA
372 * request. This is called from the various SATA constructors.
373 * @sci_req: The general IO request object which is to be used in
374 * constructing the SCU task context.
375 * @task_context: The buffer pointer for the SCU task context which is being
376 * constructed.
377 *
378 * The general io request construction is complete. The buffer assignment for
379 * the command buffer is complete. none Revisit task context construction to
380 * determine what is common for SSP/SMP/STP task context structures.
381 */
382static void scu_sata_reqeust_construct_task_context(
383 struct scic_sds_request *sci_req,
384 struct scu_task_context *task_context)
385{
386 dma_addr_t dma_addr;
387 struct scic_sds_controller *controller;
388 struct scic_sds_remote_device *target_device;
389 struct scic_sds_port *target_port;
390
391 controller = scic_sds_request_get_controller(sci_req);
392 target_device = scic_sds_request_get_device(sci_req);
393 target_port = scic_sds_request_get_port(sci_req);
394
395 /* Fill in the TC with the its required data */
396 task_context->abort = 0;
397 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
398 task_context->initiator_request = 1;
399 task_context->connection_rate = target_device->connection_rate;
400 task_context->protocol_engine_index =
401 scic_sds_controller_get_protocol_engine_group(controller);
402 task_context->logical_port_index =
403 scic_sds_port_get_index(target_port);
404 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
405 task_context->valid = SCU_TASK_CONTEXT_VALID;
406 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
407
408 task_context->remote_node_index =
409 scic_sds_remote_device_get_index(sci_req->target_device);
410 task_context->command_code = 0;
411
412 task_context->link_layer_control = 0;
413 task_context->do_not_dma_ssp_good_response = 1;
414 task_context->strict_ordering = 0;
415 task_context->control_frame = 0;
416 task_context->timeout_enable = 0;
417 task_context->block_guard_enable = 0;
418
419 task_context->address_modifier = 0;
420 task_context->task_phase = 0x01;
421
422 task_context->ssp_command_iu_length =
423 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
424
425 /* Set the first word of the H2D REG FIS */
426 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
427
428 if (sci_req->was_tag_assigned_by_user) {
429 /*
430 * Build the task context now since we have already read
431 * the data
432 */
433 sci_req->post_context =
434 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
435 (scic_sds_controller_get_protocol_engine_group(
436 controller) <<
437 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
438 (scic_sds_port_get_index(target_port) <<
439 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
440 scic_sds_io_tag_get_index(sci_req->io_tag));
441 } else {
442 /*
443 * Build the task context now since we have already read
444 * the data.
445 * I/O tag index is not assigned because we have to wait
446 * until we get a TCi.
447 */
448 sci_req->post_context =
449 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
450 (scic_sds_controller_get_protocol_engine_group(
451 controller) <<
452 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
453 (scic_sds_port_get_index(target_port) <<
454 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
455 }
456
457 /*
458 * Copy the physical address for the command buffer to the SCU Task
459 * Context. We must offset the command buffer by 4 bytes because the
460 * first 4 bytes are transfered in the body of the TC.
461 */
462 dma_addr = scic_io_request_get_dma_addr(sci_req,
463 ((char *) &sci_req->stp.cmd) +
464 sizeof(u32));
465
466 task_context->command_iu_upper = upper_32_bits(dma_addr);
467 task_context->command_iu_lower = lower_32_bits(dma_addr);
468
469 /* SATA Requests do not have a response buffer */
470 task_context->response_iu_upper = 0;
471 task_context->response_iu_lower = 0;
472}
473
474
369 475
370/** 476/**
371 * This method constructs the SSP Command IU data for this ssp passthrough 477 * scu_stp_raw_request_construct_task_context -
372 * comand request object. 478 * @sci_req: This parameter specifies the STP request object for which to
373 * @sci_req: This parameter specifies the request object for which the SSP 479 * construct a RAW command frame task context.
374 * command information unit is being built. 480 * @task_context: This parameter specifies the SCU specific task context buffer
481 * to construct.
375 * 482 *
376 * enum sci_status, returns invalid parameter is cdb > 16 483 * This method performs the operations common to all SATA/STP requests
484 * utilizing the raw frame method. none
377 */ 485 */
486static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
487 struct scu_task_context *task_context)
488{
489 struct scic_sds_request *sci_req = to_sci_req(stp_req);
490
491 scu_sata_reqeust_construct_task_context(sci_req, task_context);
492
493 task_context->control_frame = 0;
494 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
495 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
496 task_context->type.stp.fis_type = FIS_REGH2D;
497 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
498}
499
500static enum sci_status
501scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
502 bool copy_rx_frame)
503{
504 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
505 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
506
507 scu_stp_raw_request_construct_task_context(stp_req,
508 sci_req->task_context_buffer);
509
510 pio->current_transfer_bytes = 0;
511 pio->ending_error = 0;
512 pio->ending_status = 0;
378 513
514 pio->request_current.sgl_offset = 0;
515 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
516
517 if (copy_rx_frame) {
518 scic_sds_request_build_sgl(sci_req);
519 /* Since the IO request copy of the TC contains the same data as
520 * the actual TC this pointer is vaild for either.
521 */
522 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
523 } else {
524 /* The user does not want the data copied to the SGL buffer location */
525 pio->request_current.sgl_pair = NULL;
526 }
527
528 return SCI_SUCCESS;
529}
379 530
380/** 531/**
381 * This method constructs the SATA request object.
382 * @sci_req:
383 * @sat_protocol:
384 * @transfer_length:
385 * @data_direction:
386 * @copy_rx_frame:
387 * 532 *
388 * enum sci_status 533 * @sci_req: This parameter specifies the request to be constructed as an
534 * optimized request.
535 * @optimized_task_type: This parameter specifies whether the request is to be
536 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
537 * value of 1 indicates NCQ.
538 *
539 * This method will perform request construction common to all types of STP
540 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
541 * returns an indication as to whether the construction was successful.
389 */ 542 */
543static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
544 u8 optimized_task_type,
545 u32 len,
546 enum dma_data_direction dir)
547{
548 struct scu_task_context *task_context = sci_req->task_context_buffer;
549
550 /* Build the STP task context structure */
551 scu_sata_reqeust_construct_task_context(sci_req, task_context);
552
553 /* Copy over the SGL elements */
554 scic_sds_request_build_sgl(sci_req);
555
556 /* Copy over the number of bytes to be transfered */
557 task_context->transfer_length_bytes = len;
558
559 if (dir == DMA_TO_DEVICE) {
560 /*
561 * The difference between the DMA IN and DMA OUT request task type
562 * values are consistent with the difference between FPDMA READ
563 * and FPDMA WRITE values. Add the supplied task type parameter
564 * to this difference to set the task type properly for this
565 * DATA OUT (WRITE) case. */
566 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
567 - SCU_TASK_TYPE_DMA_IN);
568 } else {
569 /*
570 * For the DATA IN (READ) case, simply save the supplied
571 * optimized task type. */
572 task_context->task_type = optimized_task_type;
573 }
574}
575
576
577
390static enum sci_status 578static enum sci_status
391scic_io_request_construct_sata(struct scic_sds_request *sci_req, 579scic_io_request_construct_sata(struct scic_sds_request *sci_req,
392 u32 len, 580 u32 len,
@@ -402,9 +590,11 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req,
402 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 590 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
403 591
404 if (tmf->tmf_code == isci_tmf_sata_srst_high || 592 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
405 tmf->tmf_code == isci_tmf_sata_srst_low) 593 tmf->tmf_code == isci_tmf_sata_srst_low) {
406 return scic_sds_stp_soft_reset_request_construct(sci_req); 594 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
407 else { 595 sci_req->task_context_buffer);
596 return SCI_SUCCESS;
597 } else {
408 dev_err(scic_to_dev(sci_req->owning_controller), 598 dev_err(scic_to_dev(sci_req->owning_controller),
409 "%s: Request 0x%p received un-handled SAT " 599 "%s: Request 0x%p received un-handled SAT "
410 "management protocol 0x%x.\n", 600 "management protocol 0x%x.\n",
@@ -424,17 +614,27 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req,
424 } 614 }
425 615
426 /* non data */ 616 /* non data */
427 if (task->data_dir == DMA_NONE) 617 if (task->data_dir == DMA_NONE) {
428 return scic_sds_stp_non_data_request_construct(sci_req); 618 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
619 sci_req->task_context_buffer);
620 return SCI_SUCCESS;
621 }
429 622
430 /* NCQ */ 623 /* NCQ */
431 if (task->ata_task.use_ncq) 624 if (task->ata_task.use_ncq) {
432 return scic_sds_stp_ncq_request_construct(sci_req, len, dir); 625 scic_sds_stp_optimized_request_construct(sci_req,
626 SCU_TASK_TYPE_FPDMAQ_READ,
627 len, dir);
628 return SCI_SUCCESS;
629 }
433 630
434 /* DMA */ 631 /* DMA */
435 if (task->ata_task.dma_xfer) 632 if (task->ata_task.dma_xfer) {
436 return scic_sds_stp_udma_request_construct(sci_req, len, dir); 633 scic_sds_stp_optimized_request_construct(sci_req,
437 else /* PIO */ 634 SCU_TASK_TYPE_DMA_IN,
635 len, dir);
636 return SCI_SUCCESS;
637 } else /* PIO */
438 return scic_sds_stp_pio_request_construct(sci_req, copy); 638 return scic_sds_stp_pio_request_construct(sci_req, copy);
439 639
440 return status; 640 return status;
@@ -453,9 +653,8 @@ static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_reque
453 653
454 scic_sds_io_request_build_ssp_command_iu(sci_req); 654 scic_sds_io_request_build_ssp_command_iu(sci_req);
455 655
456 sci_base_state_machine_change_state( 656 sci_base_state_machine_change_state(&sci_req->state_machine,
457 &sci_req->state_machine, 657 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
458 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
459 658
460 return SCI_SUCCESS; 659 return SCI_SUCCESS;
461} 660}
@@ -470,12 +669,11 @@ enum sci_status scic_task_request_construct_ssp(
470 scic_sds_task_request_build_ssp_task_iu(sci_req); 669 scic_sds_task_request_build_ssp_task_iu(sci_req);
471 670
472 sci_base_state_machine_change_state(&sci_req->state_machine, 671 sci_base_state_machine_change_state(&sci_req->state_machine,
473 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 672 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
474 673
475 return SCI_SUCCESS; 674 return SCI_SUCCESS;
476} 675}
477 676
478
479static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) 677static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
480{ 678{
481 enum sci_status status; 679 enum sci_status status;
@@ -496,12 +694,11 @@ static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_requ
496 694
497 if (status == SCI_SUCCESS) 695 if (status == SCI_SUCCESS)
498 sci_base_state_machine_change_state(&sci_req->state_machine, 696 sci_base_state_machine_change_state(&sci_req->state_machine,
499 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 697 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
500 698
501 return status; 699 return status;
502} 700}
503 701
504
505enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) 702enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
506{ 703{
507 enum sci_status status = SCI_SUCCESS; 704 enum sci_status status = SCI_SUCCESS;
@@ -513,7 +710,8 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re
513 710
514 if (tmf->tmf_code == isci_tmf_sata_srst_high || 711 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
515 tmf->tmf_code == isci_tmf_sata_srst_low) { 712 tmf->tmf_code == isci_tmf_sata_srst_low) {
516 status = scic_sds_stp_soft_reset_request_construct(sci_req); 713 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
714 sci_req->task_context_buffer);
517 } else { 715 } else {
518 dev_err(scic_to_dev(sci_req->owning_controller), 716 dev_err(scic_to_dev(sci_req->owning_controller),
519 "%s: Request 0x%p received un-handled SAT " 717 "%s: Request 0x%p received un-handled SAT "
@@ -524,10 +722,10 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re
524 } 722 }
525 } 723 }
526 724
527 if (status == SCI_SUCCESS) 725 if (status != SCI_SUCCESS)
528 sci_base_state_machine_change_state( 726 return status;
529 &sci_req->state_machine, 727 sci_base_state_machine_change_state(&sci_req->state_machine,
530 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 728 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
531 729
532 return status; 730 return status;
533} 731}
@@ -724,7 +922,7 @@ static enum sci_status scic_sds_request_constructed_state_start_handler(
724 922
725 /* Everything is good go ahead and change state */ 923 /* Everything is good go ahead and change state */
726 sci_base_state_machine_change_state(&request->state_machine, 924 sci_base_state_machine_change_state(&request->state_machine,
727 SCI_BASE_REQUEST_STATE_STARTED); 925 SCI_BASE_REQUEST_STATE_STARTED);
728 926
729 return SCI_SUCCESS; 927 return SCI_SUCCESS;
730 } 928 }
@@ -749,29 +947,14 @@ static enum sci_status scic_sds_request_constructed_state_abort_handler(
749 SCI_FAILURE_IO_TERMINATED); 947 SCI_FAILURE_IO_TERMINATED);
750 948
751 sci_base_state_machine_change_state(&request->state_machine, 949 sci_base_state_machine_change_state(&request->state_machine,
752 SCI_BASE_REQUEST_STATE_COMPLETED); 950 SCI_BASE_REQUEST_STATE_COMPLETED);
753 return SCI_SUCCESS; 951 return SCI_SUCCESS;
754} 952}
755 953
756/* 954static enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req)
757 * *****************************************************************************
758 * * STARTED STATE HANDLERS
759 * ***************************************************************************** */
760
761/*
762 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
763 * object receives a scic_sds_request_terminate() request. Since the request
764 * has been posted to the hardware the io request state is changed to the
765 * aborting state. enum sci_status SCI_SUCCESS
766 */
767enum sci_status scic_sds_request_started_state_abort_handler(
768 struct scic_sds_request *request)
769{ 955{
770 if (request->has_started_substate_machine) 956 sci_base_state_machine_change_state(&sci_req->state_machine,
771 sci_base_state_machine_stop(&request->started_substate_machine); 957 SCI_BASE_REQUEST_STATE_ABORTING);
772
773 sci_base_state_machine_change_state(&request->state_machine,
774 SCI_BASE_REQUEST_STATE_ABORTING);
775 return SCI_SUCCESS; 958 return SCI_SUCCESS;
776} 959}
777 960
@@ -943,19 +1126,15 @@ scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sc
943 */ 1126 */
944 1127
945 /* In all cases we will treat this as the completion of the IO req. */ 1128 /* In all cases we will treat this as the completion of the IO req. */
946 sci_base_state_machine_change_state( 1129 sci_base_state_machine_change_state(&sci_req->state_machine,
947 &sci_req->state_machine, 1130 SCI_BASE_REQUEST_STATE_COMPLETED);
948 SCI_BASE_REQUEST_STATE_COMPLETED);
949 return SCI_SUCCESS; 1131 return SCI_SUCCESS;
950} 1132}
951 1133
952enum sci_status 1134enum sci_status
953scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code) 1135scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
954{ 1136{
955 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED && 1137 if (request->state_handlers->tc_completion_handler)
956 request->has_started_substate_machine == false)
957 return scic_sds_request_started_state_tc_completion_handler(request, completion_code);
958 else if (request->state_handlers->tc_completion_handler)
959 return request->state_handlers->tc_completion_handler(request, completion_code); 1138 return request->state_handlers->tc_completion_handler(request, completion_code);
960 1139
961 dev_warn(scic_to_dev(request->owning_controller), 1140 dev_warn(scic_to_dev(request->owning_controller),
@@ -1064,7 +1243,7 @@ static enum sci_status scic_sds_request_completed_state_complete_handler(
1064 } 1243 }
1065 1244
1066 sci_base_state_machine_change_state(&request->state_machine, 1245 sci_base_state_machine_change_state(&request->state_machine,
1067 SCI_BASE_REQUEST_STATE_FINAL); 1246 SCI_BASE_REQUEST_STATE_FINAL);
1068 return SCI_SUCCESS; 1247 return SCI_SUCCESS;
1069} 1248}
1070 1249
@@ -1084,7 +1263,7 @@ static enum sci_status scic_sds_request_aborting_state_abort_handler(
1084 struct scic_sds_request *request) 1263 struct scic_sds_request *request)
1085{ 1264{
1086 sci_base_state_machine_change_state(&request->state_machine, 1265 sci_base_state_machine_change_state(&request->state_machine,
1087 SCI_BASE_REQUEST_STATE_COMPLETED); 1266 SCI_BASE_REQUEST_STATE_COMPLETED);
1088 return SCI_SUCCESS; 1267 return SCI_SUCCESS;
1089} 1268}
1090 1269
@@ -1107,7 +1286,7 @@ static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
1107 ); 1286 );
1108 1287
1109 sci_base_state_machine_change_state(&sci_req->state_machine, 1288 sci_base_state_machine_change_state(&sci_req->state_machine,
1110 SCI_BASE_REQUEST_STATE_COMPLETED); 1289 SCI_BASE_REQUEST_STATE_COMPLETED);
1111 break; 1290 break;
1112 1291
1113 default: 1292 default:
@@ -1161,7 +1340,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi
1161 SCI_SUCCESS); 1340 SCI_SUCCESS);
1162 1341
1163 sci_base_state_machine_change_state(&sci_req->state_machine, 1342 sci_base_state_machine_change_state(&sci_req->state_machine,
1164 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); 1343 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1165 break; 1344 break;
1166 1345
1167 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1346 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
@@ -1178,7 +1357,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi
1178 completion_code); 1357 completion_code);
1179 1358
1180 sci_base_state_machine_change_state(&sci_req->state_machine, 1359 sci_base_state_machine_change_state(&sci_req->state_machine,
1181 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); 1360 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1182 break; 1361 break;
1183 1362
1184 default: 1363 default:
@@ -1192,7 +1371,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi
1192 ); 1371 );
1193 1372
1194 sci_base_state_machine_change_state(&sci_req->state_machine, 1373 sci_base_state_machine_change_state(&sci_req->state_machine,
1195 SCI_BASE_REQUEST_STATE_COMPLETED); 1374 SCI_BASE_REQUEST_STATE_COMPLETED);
1196 break; 1375 break;
1197 } 1376 }
1198 1377
@@ -1215,9 +1394,9 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler
1215 struct scic_sds_request *request) 1394 struct scic_sds_request *request)
1216{ 1395{
1217 sci_base_state_machine_change_state(&request->state_machine, 1396 sci_base_state_machine_change_state(&request->state_machine,
1218 SCI_BASE_REQUEST_STATE_ABORTING); 1397 SCI_BASE_REQUEST_STATE_ABORTING);
1219 sci_base_state_machine_change_state(&request->state_machine, 1398 sci_base_state_machine_change_state(&request->state_machine,
1220 SCI_BASE_REQUEST_STATE_COMPLETED); 1399 SCI_BASE_REQUEST_STATE_COMPLETED);
1221 return SCI_SUCCESS; 1400 return SCI_SUCCESS;
1222} 1401}
1223 1402
@@ -1243,7 +1422,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler
1243 scic_sds_io_request_copy_response(request); 1422 scic_sds_io_request_copy_response(request);
1244 1423
1245 sci_base_state_machine_change_state(&request->state_machine, 1424 sci_base_state_machine_change_state(&request->state_machine,
1246 SCI_BASE_REQUEST_STATE_COMPLETED); 1425 SCI_BASE_REQUEST_STATE_COMPLETED);
1247 scic_sds_controller_release_frame(request->owning_controller, 1426 scic_sds_controller_release_frame(request->owning_controller,
1248 frame_index); 1427 frame_index);
1249 return SCI_SUCCESS; 1428 return SCI_SUCCESS;
@@ -1270,13 +1449,11 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler
1270 /* 1449 /*
1271 * In the AWAIT RESPONSE state, any TC completion is unexpected. 1450 * In the AWAIT RESPONSE state, any TC completion is unexpected.
1272 * but if the TC has success status, we complete the IO anyway. */ 1451 * but if the TC has success status, we complete the IO anyway. */
1273 scic_sds_request_set_status( 1452 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1274 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS 1453 SCI_SUCCESS);
1275 );
1276 1454
1277 sci_base_state_machine_change_state( 1455 sci_base_state_machine_change_state(&sci_req->state_machine,
1278 &sci_req->state_machine, 1456 SCI_BASE_REQUEST_STATE_COMPLETED);
1279 SCI_BASE_REQUEST_STATE_COMPLETED);
1280 break; 1457 break;
1281 1458
1282 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1459 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
@@ -1288,13 +1465,11 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler
1288 * is not able to send smp response within 2 ms. This causes our hardware 1465 * is not able to send smp response within 2 ms. This causes our hardware
1289 * break the connection and set TC completion with one of these SMP_XXX_XX_ERR 1466 * break the connection and set TC completion with one of these SMP_XXX_XX_ERR
1290 * status. For these type of error, we ask scic user to retry the request. */ 1467 * status. For these type of error, we ask scic user to retry the request. */
1291 scic_sds_request_set_status( 1468 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1292 sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED 1469 SCI_FAILURE_RETRY_REQUIRED);
1293 );
1294 1470
1295 sci_base_state_machine_change_state( 1471 sci_base_state_machine_change_state(&sci_req->state_machine,
1296 &sci_req->state_machine, 1472 SCI_BASE_REQUEST_STATE_COMPLETED);
1297 SCI_BASE_REQUEST_STATE_COMPLETED);
1298 break; 1473 break;
1299 1474
1300 default: 1475 default:
@@ -1307,9 +1482,8 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler
1307 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR 1482 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1308 ); 1483 );
1309 1484
1310 sci_base_state_machine_change_state( 1485 sci_base_state_machine_change_state(&sci_req->state_machine,
1311 &sci_req->state_machine, 1486 SCI_BASE_REQUEST_STATE_COMPLETED);
1312 SCI_BASE_REQUEST_STATE_COMPLETED);
1313 break; 1487 break;
1314 } 1488 }
1315 1489
@@ -1365,7 +1539,7 @@ scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_r
1365 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); 1539 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1366 1540
1367 sci_base_state_machine_change_state(&sci_req->state_machine, 1541 sci_base_state_machine_change_state(&sci_req->state_machine,
1368 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); 1542 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION);
1369 } else { 1543 } else {
1370 /* This was not a response frame why did it get forwarded? */ 1544 /* This was not a response frame why did it get forwarded? */
1371 dev_err(scic_to_dev(sci_req->owning_controller), 1545 dev_err(scic_to_dev(sci_req->owning_controller),
@@ -1381,9 +1555,8 @@ scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_r
1381 SCU_TASK_DONE_SMP_FRM_TYPE_ERR, 1555 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1382 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1556 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1383 1557
1384 sci_base_state_machine_change_state( 1558 sci_base_state_machine_change_state(&sci_req->state_machine,
1385 &sci_req->state_machine, 1559 SCI_BASE_REQUEST_STATE_COMPLETED);
1386 SCI_BASE_REQUEST_STATE_COMPLETED);
1387 } 1560 }
1388 1561
1389 scic_sds_controller_release_frame(sci_req->owning_controller, 1562 scic_sds_controller_release_frame(sci_req->owning_controller,
@@ -1411,14 +1584,111 @@ static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_ha
1411{ 1584{
1412 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1585 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1413 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1586 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1587 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1588 SCI_SUCCESS);
1589
1590 sci_base_state_machine_change_state(&sci_req->state_machine,
1591 SCI_BASE_REQUEST_STATE_COMPLETED);
1592 break;
1593
1594 default:
1595 /*
1596 * All other completion status cause the IO to be complete. If a NAK
1597 * was received, then it is up to the user to retry the request. */
1414 scic_sds_request_set_status( 1598 scic_sds_request_set_status(
1415 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS 1599 sci_req,
1600 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1601 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1416 ); 1602 );
1417 1603
1418 sci_base_state_machine_change_state( 1604 sci_base_state_machine_change_state(
1419 &sci_req->state_machine, 1605 &sci_req->state_machine,
1420 SCI_BASE_REQUEST_STATE_COMPLETED); 1606 SCI_BASE_REQUEST_STATE_COMPLETED);
1421 break; 1607 break;
1608 }
1609
1610 return SCI_SUCCESS;
1611}
1612
1613void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1614 u16 ncq_tag)
1615{
1616 /**
1617 * @note This could be made to return an error to the user if the user
1618 * attempts to set the NCQ tag in the wrong state.
1619 */
1620 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
1621}
1622
1623/**
1624 *
1625 * @sci_req:
1626 *
1627 * Get the next SGL element from the request. - Check on which SGL element pair
1628 * we are working - if working on SLG pair element A - advance to element B -
1629 * else - check to see if there are more SGL element pairs for this IO request
1630 * - if there are more SGL element pairs - advance to the next pair and return
1631 * element A struct scu_sgl_element*
1632 */
1633static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
1634{
1635 struct scu_sgl_element *current_sgl;
1636 struct scic_sds_request *sci_req = to_sci_req(stp_req);
1637 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1638
1639 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1640 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
1641 pio_sgl->sgl_pair->B.address_upper == 0) {
1642 current_sgl = NULL;
1643 } else {
1644 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1645 current_sgl = &pio_sgl->sgl_pair->B;
1646 }
1647 } else {
1648 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
1649 pio_sgl->sgl_pair->next_pair_upper == 0) {
1650 current_sgl = NULL;
1651 } else {
1652 u64 phys_addr;
1653
1654 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
1655 phys_addr <<= 32;
1656 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
1657
1658 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
1659 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1660 current_sgl = &pio_sgl->sgl_pair->A;
1661 }
1662 }
1663
1664 return current_sgl;
1665}
1666
1667/**
1668 *
1669 * @sci_req:
1670 * @completion_code:
1671 *
1672 * This method processes a TC completion. The expected TC completion is for
1673 * the transmission of the H2D register FIS containing the SATA/STP non-data
1674 * request. This method always successfully processes the TC completion.
1675 * SCI_SUCCESS This value is always returned.
1676 */
1677static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
1678 struct scic_sds_request *sci_req,
1679 u32 completion_code)
1680{
1681 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1682 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1683 scic_sds_request_set_status(
1684 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1685 );
1686
1687 sci_base_state_machine_change_state(
1688 &sci_req->state_machine,
1689 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
1690 );
1691 break;
1422 1692
1423 default: 1693 default:
1424 /* 1694 /*
@@ -1431,14 +1701,861 @@ static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_ha
1431 ); 1701 );
1432 1702
1433 sci_base_state_machine_change_state( 1703 sci_base_state_machine_change_state(
1704 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1705 break;
1706 }
1707
1708 return SCI_SUCCESS;
1709}
1710
1711/**
1712 *
1713 * @request: This parameter specifies the request for which a frame has been
1714 * received.
1715 * @frame_index: This parameter specifies the index of the frame that has been
1716 * received.
1717 *
1718 * This method processes frames received from the target while waiting for a
1719 * device to host register FIS. If a non-register FIS is received during this
1720 * time, it is treated as a protocol violation from an IO perspective. Indicate
1721 * if the received frame was processed successfully.
1722 */
1723static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
1724 struct scic_sds_request *sci_req,
1725 u32 frame_index)
1726{
1727 enum sci_status status;
1728 struct dev_to_host_fis *frame_header;
1729 u32 *frame_buffer;
1730 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1731 struct scic_sds_controller *scic = sci_req->owning_controller;
1732
1733 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1734 frame_index,
1735 (void **)&frame_header);
1736
1737 if (status != SCI_SUCCESS) {
1738 dev_err(scic_to_dev(sci_req->owning_controller),
1739 "%s: SCIC IO Request 0x%p could not get frame header "
1740 "for frame index %d, status %x\n",
1741 __func__, stp_req, frame_index, status);
1742
1743 return status;
1744 }
1745
1746 switch (frame_header->fis_type) {
1747 case FIS_REGD2H:
1748 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1749 frame_index,
1750 (void **)&frame_buffer);
1751
1752 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1753 frame_header,
1754 frame_buffer);
1755
1756 /* The command has completed with error */
1757 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1758 SCI_FAILURE_IO_RESPONSE_VALID);
1759 break;
1760
1761 default:
1762 dev_warn(scic_to_dev(scic),
1763 "%s: IO Request:0x%p Frame Id:%d protocol "
1764 "violation occurred\n", __func__, stp_req,
1765 frame_index);
1766
1767 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1768 SCI_FAILURE_PROTOCOL_VIOLATION);
1769 break;
1770 }
1771
1772 sci_base_state_machine_change_state(&sci_req->state_machine,
1773 SCI_BASE_REQUEST_STATE_COMPLETED);
1774
1775 /* Frame has been decoded return it to the controller */
1776 scic_sds_controller_release_frame(scic, frame_index);
1777
1778 return status;
1779}
1780
1781#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1782
1783/* transmit DATA_FIS from (current sgl + offset) for input
1784 * parameter length. current sgl and offset is alreay stored in the IO request
1785 */
1786static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1787 struct scic_sds_request *sci_req,
1788 u32 length)
1789{
1790 struct scic_sds_controller *scic = sci_req->owning_controller;
1791 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1792 struct scu_task_context *task_context;
1793 struct scu_sgl_element *current_sgl;
1794
1795 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1796 * for the data from current_sgl+offset for the input length
1797 */
1798 task_context = scic_sds_controller_get_task_context_buffer(scic,
1799 sci_req->io_tag);
1800
1801 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1802 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
1803 else
1804 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
1805
1806 /* update the TC */
1807 task_context->command_iu_upper = current_sgl->address_upper;
1808 task_context->command_iu_lower = current_sgl->address_lower;
1809 task_context->transfer_length_bytes = length;
1810 task_context->type.stp.fis_type = FIS_DATA;
1811
1812 /* send the new TC out. */
1813 return scic_controller_continue_io(sci_req);
1814}
1815
1816static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1817{
1818
1819 struct scu_sgl_element *current_sgl;
1820 u32 sgl_offset;
1821 u32 remaining_bytes_in_current_sgl = 0;
1822 enum sci_status status = SCI_SUCCESS;
1823 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1824
1825 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1826
1827 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1828 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
1829 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
1830 } else {
1831 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
1832 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
1833 }
1834
1835
1836 if (stp_req->type.pio.pio_transfer_bytes > 0) {
1837 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1838 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1839 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1840 if (status == SCI_SUCCESS) {
1841 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1842
1843 /* update the current sgl, sgl_offset and save for future */
1844 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
1845 sgl_offset = 0;
1846 }
1847 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1848 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1849 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1850
1851 if (status == SCI_SUCCESS) {
1852 /* Sgl offset will be adjusted and saved for future */
1853 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1854 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1855 stp_req->type.pio.pio_transfer_bytes = 0;
1856 }
1857 }
1858 }
1859
1860 if (status == SCI_SUCCESS) {
1861 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1862 }
1863
1864 return status;
1865}
1866
1867/**
1868 *
1869 * @stp_request: The request that is used for the SGL processing.
1870 * @data_buffer: The buffer of data to be copied.
1871 * @length: The length of the data transfer.
1872 *
1873 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1874 * specified data region. enum sci_status
1875 */
1876static enum sci_status
1877scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1878 u8 *data_buf, u32 len)
1879{
1880 struct scic_sds_request *sci_req;
1881 struct isci_request *ireq;
1882 u8 *src_addr;
1883 int copy_len;
1884 struct sas_task *task;
1885 struct scatterlist *sg;
1886 void *kaddr;
1887 int total_len = len;
1888
1889 sci_req = to_sci_req(stp_req);
1890 ireq = sci_req_to_ireq(sci_req);
1891 task = isci_request_access_task(ireq);
1892 src_addr = data_buf;
1893
1894 if (task->num_scatter > 0) {
1895 sg = task->scatter;
1896
1897 while (total_len > 0) {
1898 struct page *page = sg_page(sg);
1899
1900 copy_len = min_t(int, total_len, sg_dma_len(sg));
1901 kaddr = kmap_atomic(page, KM_IRQ0);
1902 memcpy(kaddr + sg->offset, src_addr, copy_len);
1903 kunmap_atomic(kaddr, KM_IRQ0);
1904 total_len -= copy_len;
1905 src_addr += copy_len;
1906 sg = sg_next(sg);
1907 }
1908 } else {
1909 BUG_ON(task->total_xfer_len < total_len);
1910 memcpy(task->scatter, src_addr, total_len);
1911 }
1912
1913 return SCI_SUCCESS;
1914}
1915
1916/**
1917 *
1918 * @sci_req: The PIO DATA IN request that is to receive the data.
1919 * @data_buffer: The buffer to copy from.
1920 *
1921 * Copy the data buffer to the io request data region. enum sci_status
1922 */
1923static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1924 struct scic_sds_stp_request *sci_req,
1925 u8 *data_buffer)
1926{
1927 enum sci_status status;
1928
1929 /*
1930 * If there is less than 1K remaining in the transfer request
1931 * copy just the data for the transfer */
1932 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1933 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1934 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1935
1936 if (status == SCI_SUCCESS)
1937 sci_req->type.pio.pio_transfer_bytes = 0;
1938 } else {
1939 /* We are transfering the whole frame so copy */
1940 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1941 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1942
1943 if (status == SCI_SUCCESS)
1944 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1945 }
1946
1947 return status;
1948}
1949
1950/**
1951 *
1952 * @sci_req:
1953 * @completion_code:
1954 *
1955 * enum sci_status
1956 */
1957static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
1958 struct scic_sds_request *sci_req,
1959 u32 completion_code)
1960{
1961 enum sci_status status = SCI_SUCCESS;
1962
1963 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1964 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1965 scic_sds_request_set_status(
1966 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1967 );
1968
1969 sci_base_state_machine_change_state(
1434 &sci_req->state_machine, 1970 &sci_req->state_machine,
1435 SCI_BASE_REQUEST_STATE_COMPLETED); 1971 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1972 );
1973 break;
1974
1975 default:
1976 /*
1977 * All other completion status cause the IO to be complete. If a NAK
1978 * was received, then it is up to the user to retry the request. */
1979 scic_sds_request_set_status(
1980 sci_req,
1981 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1982 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1983 );
1984
1985 sci_base_state_machine_change_state(
1986 &sci_req->state_machine,
1987 SCI_BASE_REQUEST_STATE_COMPLETED
1988 );
1989 break;
1990 }
1991
1992 return status;
1993}
1994
1995static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
1996 u32 frame_index)
1997{
1998 struct scic_sds_controller *scic = sci_req->owning_controller;
1999 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2000 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2001 struct sas_task *task = isci_request_access_task(ireq);
2002 struct dev_to_host_fis *frame_header;
2003 enum sci_status status;
2004 u32 *frame_buffer;
2005
2006 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2007 frame_index,
2008 (void **)&frame_header);
2009
2010 if (status != SCI_SUCCESS) {
2011 dev_err(scic_to_dev(scic),
2012 "%s: SCIC IO Request 0x%p could not get frame header "
2013 "for frame index %d, status %x\n",
2014 __func__, stp_req, frame_index, status);
2015 return status;
2016 }
2017
2018 switch (frame_header->fis_type) {
2019 case FIS_PIO_SETUP:
2020 /* Get from the frame buffer the PIO Setup Data */
2021 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2022 frame_index,
2023 (void **)&frame_buffer);
2024
2025 /* Get the data from the PIO Setup The SCU Hardware returns
2026 * first word in the frame_header and the rest of the data is in
2027 * the frame buffer so we need to back up one dword
2028 */
2029
2030 /* transfer_count: first 16bits in the 4th dword */
2031 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
2032
2033 /* ending_status: 4th byte in the 3rd dword */
2034 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
2035
2036 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2037 frame_header,
2038 frame_buffer);
2039
2040 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
2041
2042 /* The next state is dependent on whether the
2043 * request was PIO Data-in or Data out
2044 */
2045 if (task->data_dir == DMA_FROM_DEVICE) {
2046 sci_base_state_machine_change_state(&sci_req->state_machine,
2047 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
2048 } else if (task->data_dir == DMA_TO_DEVICE) {
2049 /* Transmit data */
2050 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
2051 if (status != SCI_SUCCESS)
2052 break;
2053 sci_base_state_machine_change_state(&sci_req->state_machine,
2054 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
2055 }
2056 break;
2057 case FIS_SETDEVBITS:
2058 sci_base_state_machine_change_state(&sci_req->state_machine,
2059 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2060 break;
2061 case FIS_REGD2H:
2062 if (frame_header->status & ATA_BUSY) {
2063 /* Now why is the drive sending a D2H Register FIS when
2064 * it is still busy? Do nothing since we are still in
2065 * the right state.
2066 */
2067 dev_dbg(scic_to_dev(scic),
2068 "%s: SCIC PIO Request 0x%p received "
2069 "D2H Register FIS with BSY status "
2070 "0x%x\n", __func__, stp_req,
2071 frame_header->status);
2072 break;
2073 }
2074
2075 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2076 frame_index,
2077 (void **)&frame_buffer);
2078
2079 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
2080 frame_header,
2081 frame_buffer);
2082
2083 scic_sds_request_set_status(sci_req,
2084 SCU_TASK_DONE_CHECK_RESPONSE,
2085 SCI_FAILURE_IO_RESPONSE_VALID);
2086
2087 sci_base_state_machine_change_state(&sci_req->state_machine,
2088 SCI_BASE_REQUEST_STATE_COMPLETED);
2089 break;
2090 default:
2091 /* FIXME: what do we do here? */
2092 break;
2093 }
2094
2095 /* Frame is decoded return it to the controller */
2096 scic_sds_controller_release_frame(scic, frame_index);
2097
2098 return status;
2099}
2100
2101static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
2102 u32 frame_index)
2103{
2104 enum sci_status status;
2105 struct dev_to_host_fis *frame_header;
2106 struct sata_fis_data *frame_buffer;
2107 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2108 struct scic_sds_controller *scic = sci_req->owning_controller;
2109
2110 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2111 frame_index,
2112 (void **)&frame_header);
2113
2114 if (status != SCI_SUCCESS) {
2115 dev_err(scic_to_dev(scic),
2116 "%s: SCIC IO Request 0x%p could not get frame header "
2117 "for frame index %d, status %x\n",
2118 __func__, stp_req, frame_index, status);
2119 return status;
2120 }
2121
2122 if (frame_header->fis_type == FIS_DATA) {
2123 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
2124 sci_req->saved_rx_frame_index = frame_index;
2125 stp_req->type.pio.pio_transfer_bytes = 0;
2126 } else {
2127 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2128 frame_index,
2129 (void **)&frame_buffer);
2130
2131 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
2132 (u8 *)frame_buffer);
2133
2134 /* Frame is decoded return it to the controller */
2135 scic_sds_controller_release_frame(scic, frame_index);
2136 }
2137
2138 /* Check for the end of the transfer, are there more
2139 * bytes remaining for this data transfer
2140 */
2141 if (status != SCI_SUCCESS ||
2142 stp_req->type.pio.pio_transfer_bytes != 0)
2143 return status;
2144
2145 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
2146 scic_sds_request_set_status(sci_req,
2147 SCU_TASK_DONE_CHECK_RESPONSE,
2148 SCI_FAILURE_IO_RESPONSE_VALID);
2149
2150 sci_base_state_machine_change_state(&sci_req->state_machine,
2151 SCI_BASE_REQUEST_STATE_COMPLETED);
2152 } else {
2153 sci_base_state_machine_change_state(&sci_req->state_machine,
2154 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2155 }
2156 } else {
2157 dev_err(scic_to_dev(scic),
2158 "%s: SCIC PIO Request 0x%p received frame %d "
2159 "with fis type 0x%02x when expecting a data "
2160 "fis.\n", __func__, stp_req, frame_index,
2161 frame_header->fis_type);
2162
2163 scic_sds_request_set_status(sci_req,
2164 SCU_TASK_DONE_GOOD,
2165 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
2166
2167 sci_base_state_machine_change_state(&sci_req->state_machine,
2168 SCI_BASE_REQUEST_STATE_COMPLETED);
2169
2170 /* Frame is decoded return it to the controller */
2171 scic_sds_controller_release_frame(scic, frame_index);
2172 }
2173
2174 return status;
2175}
2176
2177
2178/**
2179 *
2180 * @sci_req:
2181 * @completion_code:
2182 *
2183 * enum sci_status
2184 */
2185static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
2186
2187 struct scic_sds_request *sci_req,
2188 u32 completion_code)
2189{
2190 enum sci_status status = SCI_SUCCESS;
2191 bool all_frames_transferred = false;
2192 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2193
2194 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2195 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2196 /* Transmit data */
2197 if (stp_req->type.pio.pio_transfer_bytes != 0) {
2198 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
2199 if (status == SCI_SUCCESS) {
2200 if (stp_req->type.pio.pio_transfer_bytes == 0)
2201 all_frames_transferred = true;
2202 }
2203 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
2204 /*
2205 * this will happen if the all data is written at the
2206 * first time after the pio setup fis is received
2207 */
2208 all_frames_transferred = true;
2209 }
2210
2211 /* all data transferred. */
2212 if (all_frames_transferred) {
2213 /*
2214 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
2215 * and wait for PIO_SETUP fis / or D2H REg fis. */
2216 sci_base_state_machine_change_state(
2217 &sci_req->state_machine,
2218 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
2219 );
2220 }
2221 break;
2222
2223 default:
2224 /*
2225 * All other completion status cause the IO to be complete. If a NAK
2226 * was received, then it is up to the user to retry the request. */
2227 scic_sds_request_set_status(
2228 sci_req,
2229 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2230 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2231 );
2232
2233 sci_base_state_machine_change_state(
2234 &sci_req->state_machine,
2235 SCI_BASE_REQUEST_STATE_COMPLETED
2236 );
2237 break;
2238 }
2239
2240 return status;
2241}
2242
2243/**
2244 *
2245 * @request: This is the request which is receiving the event.
2246 * @event_code: This is the event code that the request on which the request is
2247 * expected to take action.
2248 *
2249 * This method will handle any link layer events while waiting for the data
2250 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
2251 */
2252static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
2253 struct scic_sds_request *request,
2254 u32 event_code)
2255{
2256 enum sci_status status;
2257
2258 switch (scu_get_event_specifier(event_code)) {
2259 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
2260 /*
2261 * We are waiting for data and the SCU has R_ERR the data frame.
2262 * Go back to waiting for the D2H Register FIS */
2263 sci_base_state_machine_change_state(
2264 &request->state_machine,
2265 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
2266 );
2267
2268 status = SCI_SUCCESS;
2269 break;
2270
2271 default:
2272 dev_err(scic_to_dev(request->owning_controller),
2273 "%s: SCIC PIO Request 0x%p received unexpected "
2274 "event 0x%08x\n",
2275 __func__, request, event_code);
2276
2277 /* / @todo Should we fail the PIO request when we get an unexpected event? */
2278 status = SCI_FAILURE;
2279 break;
2280 }
2281
2282 return status;
2283}
2284
2285static void scic_sds_stp_request_udma_complete_request(
2286 struct scic_sds_request *request,
2287 u32 scu_status,
2288 enum sci_status sci_status)
2289{
2290 scic_sds_request_set_status(request, scu_status, sci_status);
2291 sci_base_state_machine_change_state(&request->state_machine,
2292 SCI_BASE_REQUEST_STATE_COMPLETED);
2293}
2294
2295static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
2296 u32 frame_index)
2297{
2298 struct scic_sds_controller *scic = sci_req->owning_controller;
2299 struct dev_to_host_fis *frame_header;
2300 enum sci_status status;
2301 u32 *frame_buffer;
2302
2303 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2304 frame_index,
2305 (void **)&frame_header);
2306
2307 if ((status == SCI_SUCCESS) &&
2308 (frame_header->fis_type == FIS_REGD2H)) {
2309 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2310 frame_index,
2311 (void **)&frame_buffer);
2312
2313 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2314 frame_header,
2315 frame_buffer);
2316 }
2317
2318 scic_sds_controller_release_frame(scic, frame_index);
2319
2320 return status;
2321}
2322
2323static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
2324 struct scic_sds_request *sci_req,
2325 u32 completion_code)
2326{
2327 enum sci_status status = SCI_SUCCESS;
2328
2329 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2330 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2331 scic_sds_stp_request_udma_complete_request(sci_req,
2332 SCU_TASK_DONE_GOOD,
2333 SCI_SUCCESS);
2334 break;
2335 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2336 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2337 /*
2338 * We must check ther response buffer to see if the D2H Register FIS was
2339 * received before we got the TC completion. */
2340 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
2341 scic_sds_remote_device_suspend(sci_req->target_device,
2342 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2343
2344 scic_sds_stp_request_udma_complete_request(sci_req,
2345 SCU_TASK_DONE_CHECK_RESPONSE,
2346 SCI_FAILURE_IO_RESPONSE_VALID);
2347 } else {
2348 /*
2349 * If we have an error completion status for the TC then we can expect a
2350 * D2H register FIS from the device so we must change state to wait for it */
2351 sci_base_state_machine_change_state(&sci_req->state_machine,
2352 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
2353 }
2354 break;
2355
2356 /*
2357 * / @todo Check to see if any of these completion status need to wait for
2358 * / the device to host register fis. */
2359 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
2360 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2361 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2362 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2363 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2364 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
2365 scic_sds_remote_device_suspend(sci_req->target_device,
2366 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2367 /* Fall through to the default case */
2368 default:
2369 /* All other completion status cause the IO to be complete. */
2370 scic_sds_stp_request_udma_complete_request(sci_req,
2371 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2372 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2373 break;
2374 }
2375
2376 return status;
2377}
2378
2379static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
2380 struct scic_sds_request *sci_req,
2381 u32 frame_index)
2382{
2383 enum sci_status status;
2384
2385 /* Use the general frame handler to copy the resposne data */
2386 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
2387
2388 if (status != SCI_SUCCESS)
2389 return status;
2390
2391 scic_sds_stp_request_udma_complete_request(sci_req,
2392 SCU_TASK_DONE_CHECK_RESPONSE,
2393 SCI_FAILURE_IO_RESPONSE_VALID);
2394
2395 return status;
2396}
2397
2398enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
2399 u32 len,
2400 enum dma_data_direction dir)
2401{
2402 return SCI_SUCCESS;
2403}
2404
2405/**
2406 *
2407 * @sci_req:
2408 * @completion_code:
2409 *
2410 * This method processes a TC completion. The expected TC completion is for
2411 * the transmission of the H2D register FIS containing the SATA/STP non-data
2412 * request. This method always successfully processes the TC completion.
2413 * SCI_SUCCESS This value is always returned.
2414 */
2415static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
2416 struct scic_sds_request *sci_req,
2417 u32 completion_code)
2418{
2419 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2420 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2421 scic_sds_request_set_status(
2422 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
2423 );
2424
2425 sci_base_state_machine_change_state(
2426 &sci_req->state_machine,
2427 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
2428 );
2429 break;
2430
2431 default:
2432 /*
2433 * All other completion status cause the IO to be complete. If a NAK
2434 * was received, then it is up to the user to retry the request. */
2435 scic_sds_request_set_status(
2436 sci_req,
2437 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2438 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2439 );
2440
2441 sci_base_state_machine_change_state(
2442 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
2443 break;
2444 }
2445
2446 return SCI_SUCCESS;
2447}
2448
2449/**
2450 *
2451 * @sci_req:
2452 * @completion_code:
2453 *
2454 * This method processes a TC completion. The expected TC completion is for
2455 * the transmission of the H2D register FIS containing the SATA/STP non-data
2456 * request. This method always successfully processes the TC completion.
2457 * SCI_SUCCESS This value is always returned.
2458 */
2459static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
2460 struct scic_sds_request *sci_req,
2461 u32 completion_code)
2462{
2463 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2464 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2465 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2466 SCI_SUCCESS);
2467
2468 sci_base_state_machine_change_state(&sci_req->state_machine,
2469 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE);
2470 break;
2471
2472 default:
2473 /*
2474 * All other completion status cause the IO to be complete. If a NAK
2475 * was received, then it is up to the user to retry the request. */
2476 scic_sds_request_set_status(
2477 sci_req,
2478 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2479 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2480 );
2481
2482 sci_base_state_machine_change_state(&sci_req->state_machine,
2483 SCI_BASE_REQUEST_STATE_COMPLETED);
1436 break; 2484 break;
1437 } 2485 }
1438 2486
1439 return SCI_SUCCESS; 2487 return SCI_SUCCESS;
1440} 2488}
1441 2489
2490/**
2491 *
2492 * @request: This parameter specifies the request for which a frame has been
2493 * received.
2494 * @frame_index: This parameter specifies the index of the frame that has been
2495 * received.
2496 *
2497 * This method processes frames received from the target while waiting for a
2498 * device to host register FIS. If a non-register FIS is received during this
2499 * time, it is treated as a protocol violation from an IO perspective. Indicate
2500 * if the received frame was processed successfully.
2501 */
2502static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
2503 struct scic_sds_request *sci_req,
2504 u32 frame_index)
2505{
2506 enum sci_status status;
2507 struct dev_to_host_fis *frame_header;
2508 u32 *frame_buffer;
2509 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2510 struct scic_sds_controller *scic = sci_req->owning_controller;
2511
2512 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2513 frame_index,
2514 (void **)&frame_header);
2515 if (status != SCI_SUCCESS) {
2516 dev_err(scic_to_dev(scic),
2517 "%s: SCIC IO Request 0x%p could not get frame header "
2518 "for frame index %d, status %x\n",
2519 __func__, stp_req, frame_index, status);
2520 return status;
2521 }
2522
2523 switch (frame_header->fis_type) {
2524 case FIS_REGD2H:
2525 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2526 frame_index,
2527 (void **)&frame_buffer);
2528
2529 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2530 frame_header,
2531 frame_buffer);
2532
2533 /* The command has completed with error */
2534 scic_sds_request_set_status(sci_req,
2535 SCU_TASK_DONE_CHECK_RESPONSE,
2536 SCI_FAILURE_IO_RESPONSE_VALID);
2537 break;
2538
2539 default:
2540 dev_warn(scic_to_dev(scic),
2541 "%s: IO Request:0x%p Frame Id:%d protocol "
2542 "violation occurred\n", __func__, stp_req,
2543 frame_index);
2544
2545 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
2546 SCI_FAILURE_PROTOCOL_VIOLATION);
2547 break;
2548 }
2549
2550 sci_base_state_machine_change_state(&sci_req->state_machine,
2551 SCI_BASE_REQUEST_STATE_COMPLETED);
2552
2553 /* Frame has been decoded return it to the controller */
2554 scic_sds_controller_release_frame(scic, frame_index);
2555
2556 return status;
2557}
2558
1442static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { 2559static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
1443 [SCI_BASE_REQUEST_STATE_INITIAL] = { }, 2560 [SCI_BASE_REQUEST_STATE_INITIAL] = { },
1444 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { 2561 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
@@ -1467,6 +2584,52 @@ static const struct scic_sds_io_request_state_handler scic_sds_request_state_han
1467 .abort_handler = scic_sds_request_started_state_abort_handler, 2584 .abort_handler = scic_sds_request_started_state_abort_handler,
1468 .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, 2585 .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler,
1469 }, 2586 },
2587 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
2588 .abort_handler = scic_sds_request_started_state_abort_handler,
2589 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
2590 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
2591 },
2592 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
2593 .abort_handler = scic_sds_request_started_state_abort_handler,
2594 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
2595 },
2596 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
2597 .abort_handler = scic_sds_request_started_state_abort_handler,
2598 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
2599 },
2600 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
2601 .abort_handler = scic_sds_request_started_state_abort_handler,
2602 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
2603 },
2604 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
2605 .abort_handler = scic_sds_request_started_state_abort_handler,
2606 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
2607 },
2608 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
2609 .abort_handler = scic_sds_request_started_state_abort_handler,
2610 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
2611 },
2612 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
2613 .abort_handler = scic_sds_request_started_state_abort_handler,
2614 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
2615 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
2616 },
2617 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
2618 .abort_handler = scic_sds_request_started_state_abort_handler,
2619 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
2620 },
2621 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
2622 .abort_handler = scic_sds_request_started_state_abort_handler,
2623 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
2624 },
2625 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
2626 .abort_handler = scic_sds_request_started_state_abort_handler,
2627 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
2628 },
2629 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
2630 .abort_handler = scic_sds_request_started_state_abort_handler,
2631 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
2632 },
1470 [SCI_BASE_REQUEST_STATE_COMPLETED] = { 2633 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1471 .complete_handler = scic_sds_request_completed_state_complete_handler, 2634 .complete_handler = scic_sds_request_completed_state_complete_handler,
1472 }, 2635 },
@@ -2210,15 +3373,6 @@ static void scic_sds_request_constructed_state_enter(void *object)
2210 ); 3373 );
2211} 3374}
2212 3375
2213/**
2214 * scic_sds_request_started_state_enter() -
2215 * @object: This parameter specifies the base object for which the state
2216 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
2217 *
2218 * This method implements the actions taken when entering the
2219 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
2220 * SCSI Task request we must enter the started substate machine. none
2221 */
2222static void scic_sds_request_started_state_enter(void *object) 3376static void scic_sds_request_started_state_enter(void *object)
2223{ 3377{
2224 struct scic_sds_request *sci_req = object; 3378 struct scic_sds_request *sci_req = object;
@@ -2238,40 +3392,36 @@ static void scic_sds_request_started_state_enter(void *object)
2238 SCI_BASE_REQUEST_STATE_STARTED 3392 SCI_BASE_REQUEST_STATE_STARTED
2239 ); 3393 );
2240 3394
2241 /* Most of the request state machines have a started substate machine so 3395 /* all unaccelerated request types (non ssp or ncq) handled with
2242 * start its execution on the entry to the started state. 3396 * substates
2243 */ 3397 */
2244 if (sci_req->has_started_substate_machine == true)
2245 sci_base_state_machine_start(&sci_req->started_substate_machine);
2246
2247 if (!task && dev->dev_type == SAS_END_DEV) { 3398 if (!task && dev->dev_type == SAS_END_DEV) {
2248 sci_base_state_machine_change_state(sm, 3399 sci_base_state_machine_change_state(sm,
2249 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION); 3400 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION);
3401 } else if (!task &&
3402 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3403 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3404 sci_base_state_machine_change_state(sm,
3405 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
2250 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 3406 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2251 sci_base_state_machine_change_state(sm, 3407 sci_base_state_machine_change_state(sm,
2252 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE); 3408 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE);
3409 } else if (task && sas_protocol_ata(task->task_proto) &&
3410 !task->ata_task.use_ncq) {
3411 u32 state;
3412
3413 if (task->data_dir == DMA_NONE)
3414 state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE;
3415 else if (task->ata_task.dma_xfer)
3416 state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE;
3417 else /* PIO */
3418 state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE;
3419
3420 sci_base_state_machine_change_state(sm, state);
2253 } 3421 }
2254} 3422}
2255 3423
2256/** 3424/**
2257 * scic_sds_request_started_state_exit() -
2258 * @object: This parameter specifies the base object for which the state
2259 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
2260 * object.
2261 *
2262 * This method implements the actions taken when exiting the
2263 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
2264 * to stop the started substate machine. none
2265 */
2266static void scic_sds_request_started_state_exit(void *object)
2267{
2268 struct scic_sds_request *sci_req = object;
2269
2270 if (sci_req->has_started_substate_machine == true)
2271 sci_base_state_machine_stop(&sci_req->started_substate_machine);
2272}
2273
2274/**
2275 * scic_sds_request_completed_state_enter() - 3425 * scic_sds_request_completed_state_enter() -
2276 * @object: This parameter specifies the base object for which the state 3426 * @object: This parameter specifies the base object for which the state
2277 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST 3427 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
@@ -2392,6 +3542,175 @@ static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void
2392 ); 3542 );
2393} 3543}
2394 3544
3545static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
3546 void *object)
3547{
3548 struct scic_sds_request *sci_req = object;
3549
3550 SET_STATE_HANDLER(
3551 sci_req,
3552 scic_sds_request_state_handler_table,
3553 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
3554 );
3555
3556 scic_sds_remote_device_set_working_request(
3557 sci_req->target_device, sci_req
3558 );
3559}
3560
3561static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
3562{
3563 struct scic_sds_request *sci_req = object;
3564
3565 SET_STATE_HANDLER(
3566 sci_req,
3567 scic_sds_request_state_handler_table,
3568 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
3569 );
3570}
3571
3572
3573
3574static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
3575 void *object)
3576{
3577 struct scic_sds_request *sci_req = object;
3578
3579 SET_STATE_HANDLER(
3580 sci_req,
3581 scic_sds_request_state_handler_table,
3582 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
3583 );
3584
3585 scic_sds_remote_device_set_working_request(
3586 sci_req->target_device, sci_req);
3587}
3588
3589static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
3590{
3591 struct scic_sds_request *sci_req = object;
3592
3593 SET_STATE_HANDLER(
3594 sci_req,
3595 scic_sds_request_state_handler_table,
3596 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
3597 );
3598}
3599
3600static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
3601 void *object)
3602{
3603 struct scic_sds_request *sci_req = object;
3604
3605 SET_STATE_HANDLER(
3606 sci_req,
3607 scic_sds_request_state_handler_table,
3608 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
3609 );
3610}
3611
3612static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
3613 void *object)
3614{
3615 struct scic_sds_request *sci_req = object;
3616
3617 SET_STATE_HANDLER(
3618 sci_req,
3619 scic_sds_request_state_handler_table,
3620 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
3621 );
3622}
3623
3624
3625
3626static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
3627 void *object)
3628{
3629 struct scic_sds_request *sci_req = object;
3630
3631 SET_STATE_HANDLER(
3632 sci_req,
3633 scic_sds_request_state_handler_table,
3634 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
3635 );
3636}
3637
3638/**
3639 *
3640 *
3641 * This state is entered when there is an TC completion failure. The hardware
3642 * received an unexpected condition while processing the IO request and now
3643 * will UF the D2H register FIS to complete the IO.
3644 */
3645static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
3646 void *object)
3647{
3648 struct scic_sds_request *sci_req = object;
3649
3650 SET_STATE_HANDLER(
3651 sci_req,
3652 scic_sds_request_state_handler_table,
3653 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
3654 );
3655}
3656
3657
3658
3659static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
3660 void *object)
3661{
3662 struct scic_sds_request *sci_req = object;
3663
3664 SET_STATE_HANDLER(
3665 sci_req,
3666 scic_sds_request_state_handler_table,
3667 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
3668 );
3669
3670 scic_sds_remote_device_set_working_request(
3671 sci_req->target_device, sci_req
3672 );
3673}
3674
3675static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
3676 void *object)
3677{
3678 struct scic_sds_request *sci_req = object;
3679 struct scu_task_context *task_context;
3680 struct host_to_dev_fis *h2d_fis;
3681 enum sci_status status;
3682
3683 /* Clear the SRST bit */
3684 h2d_fis = &sci_req->stp.cmd;
3685 h2d_fis->control = 0;
3686
3687 /* Clear the TC control bit */
3688 task_context = scic_sds_controller_get_task_context_buffer(
3689 sci_req->owning_controller, sci_req->io_tag);
3690 task_context->control_frame = 0;
3691
3692 status = scic_controller_continue_io(sci_req);
3693 if (status == SCI_SUCCESS) {
3694 SET_STATE_HANDLER(
3695 sci_req,
3696 scic_sds_request_state_handler_table,
3697 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
3698 );
3699 }
3700}
3701
3702static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
3703 void *object)
3704{
3705 struct scic_sds_request *sci_req = object;
3706
3707 SET_STATE_HANDLER(
3708 sci_req,
3709 scic_sds_request_state_handler_table,
3710 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
3711 );
3712}
3713
2395static const struct sci_base_state scic_sds_request_state_table[] = { 3714static const struct sci_base_state scic_sds_request_state_table[] = {
2396 [SCI_BASE_REQUEST_STATE_INITIAL] = { 3715 [SCI_BASE_REQUEST_STATE_INITIAL] = {
2397 .enter_state = scic_sds_request_initial_state_enter, 3716 .enter_state = scic_sds_request_initial_state_enter,
@@ -2401,7 +3720,39 @@ static const struct sci_base_state scic_sds_request_state_table[] = {
2401 }, 3720 },
2402 [SCI_BASE_REQUEST_STATE_STARTED] = { 3721 [SCI_BASE_REQUEST_STATE_STARTED] = {
2403 .enter_state = scic_sds_request_started_state_enter, 3722 .enter_state = scic_sds_request_started_state_enter,
2404 .exit_state = scic_sds_request_started_state_exit 3723 },
3724 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3725 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
3726 },
3727 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
3728 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
3729 },
3730 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3731 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
3732 },
3733 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
3734 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
3735 },
3736 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
3737 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
3738 },
3739 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
3740 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
3741 },
3742 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
3743 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
3744 },
3745 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
3746 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
3747 },
3748 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
3749 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3750 },
3751 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
3752 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3753 },
3754 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
3755 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
2405 }, 3756 },
2406 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { 3757 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
2407 .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter, 3758 .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter,
@@ -2437,7 +3788,6 @@ static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
2437 sci_req->io_tag = io_tag; 3788 sci_req->io_tag = io_tag;
2438 sci_req->owning_controller = scic; 3789 sci_req->owning_controller = scic;
2439 sci_req->target_device = sci_dev; 3790 sci_req->target_device = sci_dev;
2440 sci_req->has_started_substate_machine = false;
2441 sci_req->protocol = SCIC_NO_PROTOCOL; 3791 sci_req->protocol = SCIC_NO_PROTOCOL;
2442 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3792 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
2443 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); 3793 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
@@ -3065,6 +4415,3 @@ int isci_request_execute(
3065 *isci_request = request; 4415 *isci_request = request;
3066 return ret; 4416 return ret;
3067} 4417}
3068
3069
3070
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index d090cb1a14d6..95b65891fc41 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -59,7 +59,6 @@
59#include "isci.h" 59#include "isci.h"
60#include "host.h" 60#include "host.h"
61#include "scu_task_context.h" 61#include "scu_task_context.h"
62#include "stp_request.h"
63 62
64/** 63/**
65 * struct isci_request_status - This enum defines the possible states of an I/O 64 * struct isci_request_status - This enum defines the possible states of an I/O
@@ -90,6 +89,63 @@ enum sci_request_protocol {
90 SCIC_STP_PROTOCOL 89 SCIC_STP_PROTOCOL
91}; /* XXX remove me, use sas_task.{dev|task_proto} instead */; 90}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
92 91
92struct scic_sds_stp_request {
93 union {
94 u32 ncq;
95
96 u32 udma;
97
98 struct scic_sds_stp_pio_request {
99 /**
100 * Total transfer for the entire PIO request recorded at request constuction
101 * time.
102 *
103 * @todo Should we just decrement this value for each byte of data transitted
104 * or received to elemenate the current_transfer_bytes field?
105 */
106 u32 total_transfer_bytes;
107
108 /**
109 * Total number of bytes received/transmitted in data frames since the start
110 * of the IO request. At the end of the IO request this should equal the
111 * total_transfer_bytes.
112 */
113 u32 current_transfer_bytes;
114
115 /**
116 * The number of bytes requested in the in the PIO setup.
117 */
118 u32 pio_transfer_bytes;
119
120 /**
121 * PIO Setup ending status value to tell us if we need to wait for another FIS
122 * or if the transfer is complete. On the receipt of a D2H FIS this will be
123 * the status field of that FIS.
124 */
125 u8 ending_status;
126
127 /**
128 * On receipt of a D2H FIS this will be the ending error field if the
129 * ending_status has the SATA_STATUS_ERR bit set.
130 */
131 u8 ending_error;
132
133 struct scic_sds_request_pio_sgl {
134 struct scu_sgl_element_pair *sgl_pair;
135 u8 sgl_set;
136 u32 sgl_offset;
137 } request_current;
138 } pio;
139
140 struct {
141 /**
142 * The number of bytes requested in the PIO setup before CDB data frame.
143 */
144 u32 device_preferred_cdb_length;
145 } packet;
146 } type;
147};
148
93struct scic_sds_request { 149struct scic_sds_request {
94 /** 150 /**
95 * This field contains the information for the base request state machine. 151 * This field contains the information for the base request state machine.
@@ -159,12 +215,6 @@ struct scic_sds_request {
159 bool is_task_management_request; 215 bool is_task_management_request;
160 216
161 /** 217 /**
162 * This field indicates that this request contains an initialized started
163 * substate machine.
164 */
165 bool has_started_substate_machine;
166
167 /**
168 * This field is a pointer to the stored rx frame data. It is used in STP 218 * This field is a pointer to the stored rx frame data. It is used in STP
169 * internal requests and SMP response frames. If this field is non-NULL the 219 * internal requests and SMP response frames. If this field is non-NULL the
170 * saved frame must be released on IO request completion. 220 * saved frame must be released on IO request completion.
@@ -174,12 +224,6 @@ struct scic_sds_request {
174 u32 saved_rx_frame_index; 224 u32 saved_rx_frame_index;
175 225
176 /** 226 /**
177 * This field specifies the data necessary to manage the sub-state
178 * machine executed while in the SCI_BASE_REQUEST_STATE_STARTED state.
179 */
180 struct sci_base_state_machine started_substate_machine;
181
182 /**
183 * This field specifies the current state handlers in place for this 227 * This field specifies the current state handlers in place for this
184 * IO Request object. This field is updated each time the request 228 * IO Request object. This field is updated each time the request
185 * changes state. 229 * changes state.
@@ -295,6 +339,41 @@ enum sci_base_request_states {
295 */ 339 */
296 SCI_BASE_REQUEST_STATE_STARTED, 340 SCI_BASE_REQUEST_STATE_STARTED,
297 341
342 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE,
343 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE,
344
345 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE,
346 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE,
347
348 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE,
349 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE,
350 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE,
351
352 /**
353 * While in this state the IO request object is waiting for the TC completion
354 * notification for the H2D Register FIS
355 */
356 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE,
357
358 /**
359 * While in this state the IO request object is waiting for either a PIO Setup
360 * FIS or a D2H register FIS. The type of frame received is based on the
361 * result of the prior frame and line conditions.
362 */
363 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE,
364
365 /**
366 * While in this state the IO request object is waiting for a DATA frame from
367 * the device.
368 */
369 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE,
370
371 /**
372 * While in this state the IO request object is waiting to transmit the next data
373 * frame to the device.
374 */
375 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE,
376
298 /** 377 /**
299 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw 378 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
300 * task management request is waiting for the transmission of the 379 * task management request is waiting for the transmission of the
@@ -383,8 +462,6 @@ struct scic_sds_io_request_state_handler {
383 462
384}; 463};
385 464
386extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[];
387
388/** 465/**
389 * scic_sds_request_get_controller() - 466 * scic_sds_request_get_controller() -
390 * 467 *
@@ -473,7 +550,6 @@ scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completi
473 (scu_sge).address_modifier = 0; \ 550 (scu_sge).address_modifier = 0; \
474 } 551 }
475 552
476void scic_sds_request_build_sgl(struct scic_sds_request *sci_req);
477enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); 553enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
478enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); 554enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
479enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, 555enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
@@ -481,8 +557,6 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r
481enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, 557enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
482 u32 frame_index); 558 u32 frame_index);
483enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req); 559enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
484enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req);
485
486 560
487/* XXX open code in caller */ 561/* XXX open code in caller */
488static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, 562static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
@@ -778,6 +852,9 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
778 struct scic_sds_request *sci_req); 852 struct scic_sds_request *sci_req);
779enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req); 853enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
780enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req); 854enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req);
855enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
856 u32 transfer_length,
857 enum dma_data_direction dir);
781void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); 858void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
782void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); 859void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
783#endif /* !defined(_ISCI_REQUEST_H_) */ 860#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/stp_request.c b/drivers/scsi/isci/stp_request.c
deleted file mode 100644
index e94ece81ed9d..000000000000
--- a/drivers/scsi/isci/stp_request.c
+++ /dev/null
@@ -1,1584 +0,0 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include <scsi/sas.h>
57#include "sas.h"
58#include "state_machine.h"
59#include "remote_device.h"
60#include "stp_request.h"
61#include "unsolicited_frame_control.h"
62#include "scu_completion_codes.h"
63#include "scu_event_codes.h"
64#include "scu_task_context.h"
65#include "request.h"
66
67/**
68 * This method is will fill in the SCU Task Context for any type of SATA
69 * request. This is called from the various SATA constructors.
70 * @sci_req: The general IO request object which is to be used in
71 * constructing the SCU task context.
72 * @task_context: The buffer pointer for the SCU task context which is being
73 * constructed.
74 *
75 * The general io request construction is complete. The buffer assignment for
76 * the command buffer is complete. none Revisit task context construction to
77 * determine what is common for SSP/SMP/STP task context structures.
78 */
79static void scu_sata_reqeust_construct_task_context(
80 struct scic_sds_request *sci_req,
81 struct scu_task_context *task_context)
82{
83 dma_addr_t dma_addr;
84 struct scic_sds_controller *controller;
85 struct scic_sds_remote_device *target_device;
86 struct scic_sds_port *target_port;
87
88 controller = scic_sds_request_get_controller(sci_req);
89 target_device = scic_sds_request_get_device(sci_req);
90 target_port = scic_sds_request_get_port(sci_req);
91
92 /* Fill in the TC with the its required data */
93 task_context->abort = 0;
94 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
95 task_context->initiator_request = 1;
96 task_context->connection_rate = target_device->connection_rate;
97 task_context->protocol_engine_index =
98 scic_sds_controller_get_protocol_engine_group(controller);
99 task_context->logical_port_index =
100 scic_sds_port_get_index(target_port);
101 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
102 task_context->valid = SCU_TASK_CONTEXT_VALID;
103 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
104
105 task_context->remote_node_index =
106 scic_sds_remote_device_get_index(sci_req->target_device);
107 task_context->command_code = 0;
108
109 task_context->link_layer_control = 0;
110 task_context->do_not_dma_ssp_good_response = 1;
111 task_context->strict_ordering = 0;
112 task_context->control_frame = 0;
113 task_context->timeout_enable = 0;
114 task_context->block_guard_enable = 0;
115
116 task_context->address_modifier = 0;
117 task_context->task_phase = 0x01;
118
119 task_context->ssp_command_iu_length =
120 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
121
122 /* Set the first word of the H2D REG FIS */
123 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
124
125 if (sci_req->was_tag_assigned_by_user) {
126 /*
127 * Build the task context now since we have already read
128 * the data
129 */
130 sci_req->post_context =
131 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
132 (scic_sds_controller_get_protocol_engine_group(
133 controller) <<
134 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
135 (scic_sds_port_get_index(target_port) <<
136 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
137 scic_sds_io_tag_get_index(sci_req->io_tag));
138 } else {
139 /*
140 * Build the task context now since we have already read
141 * the data.
142 * I/O tag index is not assigned because we have to wait
143 * until we get a TCi.
144 */
145 sci_req->post_context =
146 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
147 (scic_sds_controller_get_protocol_engine_group(
148 controller) <<
149 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
150 (scic_sds_port_get_index(target_port) <<
151 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
152 }
153
154 /*
155 * Copy the physical address for the command buffer to the SCU Task
156 * Context. We must offset the command buffer by 4 bytes because the
157 * first 4 bytes are transfered in the body of the TC.
158 */
159 dma_addr = scic_io_request_get_dma_addr(sci_req,
160 ((char *) &sci_req->stp.cmd) +
161 sizeof(u32));
162
163 task_context->command_iu_upper = upper_32_bits(dma_addr);
164 task_context->command_iu_lower = lower_32_bits(dma_addr);
165
166 /* SATA Requests do not have a response buffer */
167 task_context->response_iu_upper = 0;
168 task_context->response_iu_lower = 0;
169}
170
171/**
172 *
173 * @sci_req:
174 *
175 * This method will perform any general sata request construction. What part of
176 * SATA IO request construction is general? none
177 */
178static void scic_sds_stp_non_ncq_request_construct(
179 struct scic_sds_request *sci_req)
180{
181 sci_req->has_started_substate_machine = true;
182}
183
184/**
185 *
186 * @sci_req: This parameter specifies the request to be constructed as an
187 * optimized request.
188 * @optimized_task_type: This parameter specifies whether the request is to be
189 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
190 * value of 1 indicates NCQ.
191 *
192 * This method will perform request construction common to all types of STP
193 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
194 * returns an indication as to whether the construction was successful.
195 */
196static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
197 u8 optimized_task_type,
198 u32 len,
199 enum dma_data_direction dir)
200{
201 struct scu_task_context *task_context = sci_req->task_context_buffer;
202
203 /* Build the STP task context structure */
204 scu_sata_reqeust_construct_task_context(sci_req, task_context);
205
206 /* Copy over the SGL elements */
207 scic_sds_request_build_sgl(sci_req);
208
209 /* Copy over the number of bytes to be transfered */
210 task_context->transfer_length_bytes = len;
211
212 if (dir == DMA_TO_DEVICE) {
213 /*
214 * The difference between the DMA IN and DMA OUT request task type
215 * values are consistent with the difference between FPDMA READ
216 * and FPDMA WRITE values. Add the supplied task type parameter
217 * to this difference to set the task type properly for this
218 * DATA OUT (WRITE) case. */
219 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
220 - SCU_TASK_TYPE_DMA_IN);
221 } else {
222 /*
223 * For the DATA IN (READ) case, simply save the supplied
224 * optimized task type. */
225 task_context->task_type = optimized_task_type;
226 }
227}
228
229/**
230 *
231 * @sci_req: This parameter specifies the request to be constructed.
232 *
233 * This method will construct the STP UDMA request and its associated TC data.
234 * This method returns an indication as to whether the construction was
235 * successful. SCI_SUCCESS Currently this method always returns this value.
236 */
237enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
238 u32 len,
239 enum dma_data_direction dir)
240{
241 scic_sds_stp_optimized_request_construct(sci_req,
242 SCU_TASK_TYPE_FPDMAQ_READ,
243 len, dir);
244 return SCI_SUCCESS;
245}
246
247/**
248 * scu_stp_raw_request_construct_task_context -
249 * @sci_req: This parameter specifies the STP request object for which to
250 * construct a RAW command frame task context.
251 * @task_context: This parameter specifies the SCU specific task context buffer
252 * to construct.
253 *
254 * This method performs the operations common to all SATA/STP requests
255 * utilizing the raw frame method. none
256 */
257static void scu_stp_raw_request_construct_task_context(
258 struct scic_sds_stp_request *stp_req,
259 struct scu_task_context *task_context)
260{
261 struct scic_sds_request *sci_req = to_sci_req(stp_req);
262
263 scu_sata_reqeust_construct_task_context(sci_req, task_context);
264
265 task_context->control_frame = 0;
266 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
267 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
268 task_context->type.stp.fis_type = FIS_REGH2D;
269 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
270}
271
272void scic_stp_io_request_set_ncq_tag(
273 struct scic_sds_request *req,
274 u16 ncq_tag)
275{
276 /**
277 * @note This could be made to return an error to the user if the user
278 * attempts to set the NCQ tag in the wrong state.
279 */
280 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
281}
282
283/**
284 *
285 * @sci_req:
286 *
287 * Get the next SGL element from the request. - Check on which SGL element pair
288 * we are working - if working on SLG pair element A - advance to element B -
289 * else - check to see if there are more SGL element pairs for this IO request
290 * - if there are more SGL element pairs - advance to the next pair and return
291 * element A struct scu_sgl_element*
292 */
293static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
294{
295 struct scu_sgl_element *current_sgl;
296 struct scic_sds_request *sci_req = to_sci_req(stp_req);
297 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
298
299 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
300 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
301 pio_sgl->sgl_pair->B.address_upper == 0) {
302 current_sgl = NULL;
303 } else {
304 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
305 current_sgl = &pio_sgl->sgl_pair->B;
306 }
307 } else {
308 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
309 pio_sgl->sgl_pair->next_pair_upper == 0) {
310 current_sgl = NULL;
311 } else {
312 u64 phys_addr;
313
314 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
315 phys_addr <<= 32;
316 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
317
318 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
319 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
320 current_sgl = &pio_sgl->sgl_pair->A;
321 }
322 }
323
324 return current_sgl;
325}
326
327/**
328 *
329 * @sci_req:
330 * @completion_code:
331 *
332 * This method processes a TC completion. The expected TC completion is for
333 * the transmission of the H2D register FIS containing the SATA/STP non-data
334 * request. This method always successfully processes the TC completion.
335 * SCI_SUCCESS This value is always returned.
336 */
337static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
338 struct scic_sds_request *sci_req,
339 u32 completion_code)
340{
341 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
342 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
343 scic_sds_request_set_status(
344 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
345 );
346
347 sci_base_state_machine_change_state(
348 &sci_req->started_substate_machine,
349 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
350 );
351 break;
352
353 default:
354 /*
355 * All other completion status cause the IO to be complete. If a NAK
356 * was received, then it is up to the user to retry the request. */
357 scic_sds_request_set_status(
358 sci_req,
359 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
360 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
361 );
362
363 sci_base_state_machine_change_state(
364 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
365 break;
366 }
367
368 return SCI_SUCCESS;
369}
370
371/**
372 *
373 * @request: This parameter specifies the request for which a frame has been
374 * received.
375 * @frame_index: This parameter specifies the index of the frame that has been
376 * received.
377 *
378 * This method processes frames received from the target while waiting for a
379 * device to host register FIS. If a non-register FIS is received during this
380 * time, it is treated as a protocol violation from an IO perspective. Indicate
381 * if the received frame was processed successfully.
382 */
383static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
384 struct scic_sds_request *sci_req,
385 u32 frame_index)
386{
387 enum sci_status status;
388 struct dev_to_host_fis *frame_header;
389 u32 *frame_buffer;
390 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
391 struct scic_sds_controller *scic = sci_req->owning_controller;
392
393 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
394 frame_index,
395 (void **)&frame_header);
396
397 if (status != SCI_SUCCESS) {
398 dev_err(scic_to_dev(sci_req->owning_controller),
399 "%s: SCIC IO Request 0x%p could not get frame header "
400 "for frame index %d, status %x\n",
401 __func__, stp_req, frame_index, status);
402
403 return status;
404 }
405
406 switch (frame_header->fis_type) {
407 case FIS_REGD2H:
408 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
409 frame_index,
410 (void **)&frame_buffer);
411
412 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
413 frame_header,
414 frame_buffer);
415
416 /* The command has completed with error */
417 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
418 SCI_FAILURE_IO_RESPONSE_VALID);
419 break;
420
421 default:
422 dev_warn(scic_to_dev(scic),
423 "%s: IO Request:0x%p Frame Id:%d protocol "
424 "violation occurred\n", __func__, stp_req,
425 frame_index);
426
427 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
428 SCI_FAILURE_PROTOCOL_VIOLATION);
429 break;
430 }
431
432 sci_base_state_machine_change_state(&sci_req->state_machine,
433 SCI_BASE_REQUEST_STATE_COMPLETED);
434
435 /* Frame has been decoded return it to the controller */
436 scic_sds_controller_release_frame(scic, frame_index);
437
438 return status;
439}
440
441/* --------------------------------------------------------------------------- */
442
443static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = {
444 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
445 .abort_handler = scic_sds_request_started_state_abort_handler,
446 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
447 },
448 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
449 .abort_handler = scic_sds_request_started_state_abort_handler,
450 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
451 }
452};
453
454static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
455 void *object)
456{
457 struct scic_sds_request *sci_req = object;
458
459 SET_STATE_HANDLER(
460 sci_req,
461 scic_sds_stp_request_started_non_data_substate_handler_table,
462 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
463 );
464
465 scic_sds_remote_device_set_working_request(
466 sci_req->target_device, sci_req
467 );
468}
469
470static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
471{
472 struct scic_sds_request *sci_req = object;
473
474 SET_STATE_HANDLER(
475 sci_req,
476 scic_sds_stp_request_started_non_data_substate_handler_table,
477 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
478 );
479}
480
481/* --------------------------------------------------------------------------- */
482
483static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = {
484 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
485 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
486 },
487 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
488 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
489 },
490};
491
492enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req)
493{
494 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
495
496 scic_sds_stp_non_ncq_request_construct(sci_req);
497
498 /* Build the STP task context structure */
499 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
500
501 sci_base_state_machine_construct(&sci_req->started_substate_machine,
502 sci_req,
503 scic_sds_stp_request_started_non_data_substate_table,
504 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE);
505
506 return SCI_SUCCESS;
507}
508
509#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
510
511/* transmit DATA_FIS from (current sgl + offset) for input
512 * parameter length. current sgl and offset is alreay stored in the IO request
513 */
514static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
515 struct scic_sds_request *sci_req,
516 u32 length)
517{
518 struct scic_sds_controller *scic = sci_req->owning_controller;
519 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
520 struct scu_task_context *task_context;
521 struct scu_sgl_element *current_sgl;
522
523 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
524 * for the data from current_sgl+offset for the input length
525 */
526 task_context = scic_sds_controller_get_task_context_buffer(scic,
527 sci_req->io_tag);
528
529 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
530 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
531 else
532 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
533
534 /* update the TC */
535 task_context->command_iu_upper = current_sgl->address_upper;
536 task_context->command_iu_lower = current_sgl->address_lower;
537 task_context->transfer_length_bytes = length;
538 task_context->type.stp.fis_type = FIS_DATA;
539
540 /* send the new TC out. */
541 return scic_controller_continue_io(sci_req);
542}
543
544static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
545{
546
547 struct scu_sgl_element *current_sgl;
548 u32 sgl_offset;
549 u32 remaining_bytes_in_current_sgl = 0;
550 enum sci_status status = SCI_SUCCESS;
551 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
552
553 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
554
555 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
556 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
557 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
558 } else {
559 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
560 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
561 }
562
563
564 if (stp_req->type.pio.pio_transfer_bytes > 0) {
565 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
566 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
567 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
568 if (status == SCI_SUCCESS) {
569 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
570
571 /* update the current sgl, sgl_offset and save for future */
572 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
573 sgl_offset = 0;
574 }
575 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
576 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
577 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
578
579 if (status == SCI_SUCCESS) {
580 /* Sgl offset will be adjusted and saved for future */
581 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
582 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
583 stp_req->type.pio.pio_transfer_bytes = 0;
584 }
585 }
586 }
587
588 if (status == SCI_SUCCESS) {
589 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
590 }
591
592 return status;
593}
594
595/**
596 *
597 * @stp_request: The request that is used for the SGL processing.
598 * @data_buffer: The buffer of data to be copied.
599 * @length: The length of the data transfer.
600 *
601 * Copy the data from the buffer for the length specified to the IO reqeust SGL
602 * specified data region. enum sci_status
603 */
604static enum sci_status
605scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
606 u8 *data_buf, u32 len)
607{
608 struct scic_sds_request *sci_req;
609 struct isci_request *ireq;
610 u8 *src_addr;
611 int copy_len;
612 struct sas_task *task;
613 struct scatterlist *sg;
614 void *kaddr;
615 int total_len = len;
616
617 sci_req = to_sci_req(stp_req);
618 ireq = sci_req_to_ireq(sci_req);
619 task = isci_request_access_task(ireq);
620 src_addr = data_buf;
621
622 if (task->num_scatter > 0) {
623 sg = task->scatter;
624
625 while (total_len > 0) {
626 struct page *page = sg_page(sg);
627
628 copy_len = min_t(int, total_len, sg_dma_len(sg));
629 kaddr = kmap_atomic(page, KM_IRQ0);
630 memcpy(kaddr + sg->offset, src_addr, copy_len);
631 kunmap_atomic(kaddr, KM_IRQ0);
632 total_len -= copy_len;
633 src_addr += copy_len;
634 sg = sg_next(sg);
635 }
636 } else {
637 BUG_ON(task->total_xfer_len < total_len);
638 memcpy(task->scatter, src_addr, total_len);
639 }
640
641 return SCI_SUCCESS;
642}
643
644/**
645 *
646 * @sci_req: The PIO DATA IN request that is to receive the data.
647 * @data_buffer: The buffer to copy from.
648 *
649 * Copy the data buffer to the io request data region. enum sci_status
650 */
651static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
652 struct scic_sds_stp_request *sci_req,
653 u8 *data_buffer)
654{
655 enum sci_status status;
656
657 /*
658 * If there is less than 1K remaining in the transfer request
659 * copy just the data for the transfer */
660 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
661 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
662 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
663
664 if (status == SCI_SUCCESS)
665 sci_req->type.pio.pio_transfer_bytes = 0;
666 } else {
667 /* We are transfering the whole frame so copy */
668 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
669 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
670
671 if (status == SCI_SUCCESS)
672 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
673 }
674
675 return status;
676}
677
678/**
679 *
680 * @sci_req:
681 * @completion_code:
682 *
683 * enum sci_status
684 */
685static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
686 struct scic_sds_request *sci_req,
687 u32 completion_code)
688{
689 enum sci_status status = SCI_SUCCESS;
690
691 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
692 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
693 scic_sds_request_set_status(
694 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
695 );
696
697 sci_base_state_machine_change_state(
698 &sci_req->started_substate_machine,
699 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
700 );
701 break;
702
703 default:
704 /*
705 * All other completion status cause the IO to be complete. If a NAK
706 * was received, then it is up to the user to retry the request. */
707 scic_sds_request_set_status(
708 sci_req,
709 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
710 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
711 );
712
713 sci_base_state_machine_change_state(
714 &sci_req->state_machine,
715 SCI_BASE_REQUEST_STATE_COMPLETED
716 );
717 break;
718 }
719
720 return status;
721}
722
723static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
724 u32 frame_index)
725{
726 struct scic_sds_controller *scic = sci_req->owning_controller;
727 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
728 struct isci_request *ireq = sci_req_to_ireq(sci_req);
729 struct sas_task *task = isci_request_access_task(ireq);
730 struct dev_to_host_fis *frame_header;
731 enum sci_status status;
732 u32 *frame_buffer;
733
734 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
735 frame_index,
736 (void **)&frame_header);
737
738 if (status != SCI_SUCCESS) {
739 dev_err(scic_to_dev(scic),
740 "%s: SCIC IO Request 0x%p could not get frame header "
741 "for frame index %d, status %x\n",
742 __func__, stp_req, frame_index, status);
743 return status;
744 }
745
746 switch (frame_header->fis_type) {
747 case FIS_PIO_SETUP:
748 /* Get from the frame buffer the PIO Setup Data */
749 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
750 frame_index,
751 (void **)&frame_buffer);
752
753 /* Get the data from the PIO Setup The SCU Hardware returns
754 * first word in the frame_header and the rest of the data is in
755 * the frame buffer so we need to back up one dword
756 */
757
758 /* transfer_count: first 16bits in the 4th dword */
759 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
760
761 /* ending_status: 4th byte in the 3rd dword */
762 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
763
764 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
765 frame_header,
766 frame_buffer);
767
768 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
769
770 /* The next state is dependent on whether the
771 * request was PIO Data-in or Data out
772 */
773 if (task->data_dir == DMA_FROM_DEVICE) {
774 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
775 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
776 } else if (task->data_dir == DMA_TO_DEVICE) {
777 /* Transmit data */
778 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
779 if (status != SCI_SUCCESS)
780 break;
781 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
782 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
783 }
784 break;
785 case FIS_SETDEVBITS:
786 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
787 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
788 break;
789 case FIS_REGD2H:
790 if (frame_header->status & ATA_BUSY) {
791 /* Now why is the drive sending a D2H Register FIS when
792 * it is still busy? Do nothing since we are still in
793 * the right state.
794 */
795 dev_dbg(scic_to_dev(scic),
796 "%s: SCIC PIO Request 0x%p received "
797 "D2H Register FIS with BSY status "
798 "0x%x\n", __func__, stp_req,
799 frame_header->status);
800 break;
801 }
802
803 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
804 frame_index,
805 (void **)&frame_buffer);
806
807 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
808 frame_header,
809 frame_buffer);
810
811 scic_sds_request_set_status(sci_req,
812 SCU_TASK_DONE_CHECK_RESPONSE,
813 SCI_FAILURE_IO_RESPONSE_VALID);
814
815 sci_base_state_machine_change_state(&sci_req->state_machine,
816 SCI_BASE_REQUEST_STATE_COMPLETED);
817 break;
818 default:
819 /* FIXME: what do we do here? */
820 break;
821 }
822
823 /* Frame is decoded return it to the controller */
824 scic_sds_controller_release_frame(scic, frame_index);
825
826 return status;
827}
828
829static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
830 u32 frame_index)
831{
832 enum sci_status status;
833 struct dev_to_host_fis *frame_header;
834 struct sata_fis_data *frame_buffer;
835 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
836 struct scic_sds_controller *scic = sci_req->owning_controller;
837
838 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
839 frame_index,
840 (void **)&frame_header);
841
842 if (status != SCI_SUCCESS) {
843 dev_err(scic_to_dev(scic),
844 "%s: SCIC IO Request 0x%p could not get frame header "
845 "for frame index %d, status %x\n",
846 __func__, stp_req, frame_index, status);
847 return status;
848 }
849
850 if (frame_header->fis_type == FIS_DATA) {
851 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
852 sci_req->saved_rx_frame_index = frame_index;
853 stp_req->type.pio.pio_transfer_bytes = 0;
854 } else {
855 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
856 frame_index,
857 (void **)&frame_buffer);
858
859 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
860 (u8 *)frame_buffer);
861
862 /* Frame is decoded return it to the controller */
863 scic_sds_controller_release_frame(scic, frame_index);
864 }
865
866 /* Check for the end of the transfer, are there more
867 * bytes remaining for this data transfer
868 */
869 if (status != SCI_SUCCESS ||
870 stp_req->type.pio.pio_transfer_bytes != 0)
871 return status;
872
873 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
874 scic_sds_request_set_status(sci_req,
875 SCU_TASK_DONE_CHECK_RESPONSE,
876 SCI_FAILURE_IO_RESPONSE_VALID);
877
878 sci_base_state_machine_change_state(&sci_req->state_machine,
879 SCI_BASE_REQUEST_STATE_COMPLETED);
880 } else {
881 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
882 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
883 }
884 } else {
885 dev_err(scic_to_dev(scic),
886 "%s: SCIC PIO Request 0x%p received frame %d "
887 "with fis type 0x%02x when expecting a data "
888 "fis.\n", __func__, stp_req, frame_index,
889 frame_header->fis_type);
890
891 scic_sds_request_set_status(sci_req,
892 SCU_TASK_DONE_GOOD,
893 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
894
895 sci_base_state_machine_change_state(&sci_req->state_machine,
896 SCI_BASE_REQUEST_STATE_COMPLETED);
897
898 /* Frame is decoded return it to the controller */
899 scic_sds_controller_release_frame(scic, frame_index);
900 }
901
902 return status;
903}
904
905
906/**
907 *
908 * @sci_req:
909 * @completion_code:
910 *
911 * enum sci_status
912 */
913static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
914
915 struct scic_sds_request *sci_req,
916 u32 completion_code)
917{
918 enum sci_status status = SCI_SUCCESS;
919 bool all_frames_transferred = false;
920 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
921
922 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
923 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
924 /* Transmit data */
925 if (stp_req->type.pio.pio_transfer_bytes != 0) {
926 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
927 if (status == SCI_SUCCESS) {
928 if (stp_req->type.pio.pio_transfer_bytes == 0)
929 all_frames_transferred = true;
930 }
931 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
932 /*
933 * this will happen if the all data is written at the
934 * first time after the pio setup fis is received
935 */
936 all_frames_transferred = true;
937 }
938
939 /* all data transferred. */
940 if (all_frames_transferred) {
941 /*
942 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
943 * and wait for PIO_SETUP fis / or D2H REg fis. */
944 sci_base_state_machine_change_state(
945 &sci_req->started_substate_machine,
946 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
947 );
948 }
949 break;
950
951 default:
952 /*
953 * All other completion status cause the IO to be complete. If a NAK
954 * was received, then it is up to the user to retry the request. */
955 scic_sds_request_set_status(
956 sci_req,
957 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
958 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
959 );
960
961 sci_base_state_machine_change_state(
962 &sci_req->state_machine,
963 SCI_BASE_REQUEST_STATE_COMPLETED
964 );
965 break;
966 }
967
968 return status;
969}
970
971/**
972 *
973 * @request: This is the request which is receiving the event.
974 * @event_code: This is the event code that the request on which the request is
975 * expected to take action.
976 *
977 * This method will handle any link layer events while waiting for the data
978 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
979 */
980static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
981 struct scic_sds_request *request,
982 u32 event_code)
983{
984 enum sci_status status;
985
986 switch (scu_get_event_specifier(event_code)) {
987 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
988 /*
989 * We are waiting for data and the SCU has R_ERR the data frame.
990 * Go back to waiting for the D2H Register FIS */
991 sci_base_state_machine_change_state(
992 &request->started_substate_machine,
993 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
994 );
995
996 status = SCI_SUCCESS;
997 break;
998
999 default:
1000 dev_err(scic_to_dev(request->owning_controller),
1001 "%s: SCIC PIO Request 0x%p received unexpected "
1002 "event 0x%08x\n",
1003 __func__, request, event_code);
1004
1005 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1006 status = SCI_FAILURE;
1007 break;
1008 }
1009
1010 return status;
1011}
1012
1013/* --------------------------------------------------------------------------- */
1014
1015static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = {
1016 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1017 .abort_handler = scic_sds_request_started_state_abort_handler,
1018 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
1019 },
1020 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1021 .abort_handler = scic_sds_request_started_state_abort_handler,
1022 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
1023 },
1024 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1025 .abort_handler = scic_sds_request_started_state_abort_handler,
1026 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
1027 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
1028 },
1029 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1030 .abort_handler = scic_sds_request_started_state_abort_handler,
1031 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
1032 }
1033};
1034
1035static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
1036 void *object)
1037{
1038 struct scic_sds_request *sci_req = object;
1039
1040 SET_STATE_HANDLER(
1041 sci_req,
1042 scic_sds_stp_request_started_pio_substate_handler_table,
1043 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
1044 );
1045
1046 scic_sds_remote_device_set_working_request(
1047 sci_req->target_device, sci_req);
1048}
1049
1050static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
1051{
1052 struct scic_sds_request *sci_req = object;
1053
1054 SET_STATE_HANDLER(
1055 sci_req,
1056 scic_sds_stp_request_started_pio_substate_handler_table,
1057 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1058 );
1059}
1060
1061static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
1062 void *object)
1063{
1064 struct scic_sds_request *sci_req = object;
1065
1066 SET_STATE_HANDLER(
1067 sci_req,
1068 scic_sds_stp_request_started_pio_substate_handler_table,
1069 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
1070 );
1071}
1072
1073static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
1074 void *object)
1075{
1076 struct scic_sds_request *sci_req = object;
1077
1078 SET_STATE_HANDLER(
1079 sci_req,
1080 scic_sds_stp_request_started_pio_substate_handler_table,
1081 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
1082 );
1083}
1084
1085/* --------------------------------------------------------------------------- */
1086
1087static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = {
1088 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1089 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
1090 },
1091 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1092 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
1093 },
1094 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1095 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
1096 },
1097 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1098 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
1099 }
1100};
1101
1102enum sci_status
1103scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
1104 bool copy_rx_frame)
1105{
1106 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1107 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
1108
1109 scic_sds_stp_non_ncq_request_construct(sci_req);
1110
1111 scu_stp_raw_request_construct_task_context(stp_req,
1112 sci_req->task_context_buffer);
1113
1114 pio->current_transfer_bytes = 0;
1115 pio->ending_error = 0;
1116 pio->ending_status = 0;
1117
1118 pio->request_current.sgl_offset = 0;
1119 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1120
1121 if (copy_rx_frame) {
1122 scic_sds_request_build_sgl(sci_req);
1123 /* Since the IO request copy of the TC contains the same data as
1124 * the actual TC this pointer is vaild for either.
1125 */
1126 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
1127 } else {
1128 /* The user does not want the data copied to the SGL buffer location */
1129 pio->request_current.sgl_pair = NULL;
1130 }
1131
1132 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1133 sci_req,
1134 scic_sds_stp_request_started_pio_substate_table,
1135 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE);
1136
1137 return SCI_SUCCESS;
1138}
1139
1140static void scic_sds_stp_request_udma_complete_request(
1141 struct scic_sds_request *request,
1142 u32 scu_status,
1143 enum sci_status sci_status)
1144{
1145 scic_sds_request_set_status(request, scu_status, sci_status);
1146 sci_base_state_machine_change_state(&request->state_machine,
1147 SCI_BASE_REQUEST_STATE_COMPLETED);
1148}
1149
1150static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1151 u32 frame_index)
1152{
1153 struct scic_sds_controller *scic = sci_req->owning_controller;
1154 struct dev_to_host_fis *frame_header;
1155 enum sci_status status;
1156 u32 *frame_buffer;
1157
1158 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1159 frame_index,
1160 (void **)&frame_header);
1161
1162 if ((status == SCI_SUCCESS) &&
1163 (frame_header->fis_type == FIS_REGD2H)) {
1164 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1165 frame_index,
1166 (void **)&frame_buffer);
1167
1168 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1169 frame_header,
1170 frame_buffer);
1171 }
1172
1173 scic_sds_controller_release_frame(scic, frame_index);
1174
1175 return status;
1176}
1177
1178static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
1179 struct scic_sds_request *sci_req,
1180 u32 completion_code)
1181{
1182 enum sci_status status = SCI_SUCCESS;
1183
1184 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1185 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1186 scic_sds_stp_request_udma_complete_request(sci_req,
1187 SCU_TASK_DONE_GOOD,
1188 SCI_SUCCESS);
1189 break;
1190 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1191 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1192 /*
1193 * We must check ther response buffer to see if the D2H Register FIS was
1194 * received before we got the TC completion. */
1195 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
1196 scic_sds_remote_device_suspend(sci_req->target_device,
1197 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1198
1199 scic_sds_stp_request_udma_complete_request(sci_req,
1200 SCU_TASK_DONE_CHECK_RESPONSE,
1201 SCI_FAILURE_IO_RESPONSE_VALID);
1202 } else {
1203 /*
1204 * If we have an error completion status for the TC then we can expect a
1205 * D2H register FIS from the device so we must change state to wait for it */
1206 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
1207 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
1208 }
1209 break;
1210
1211 /*
1212 * / @todo Check to see if any of these completion status need to wait for
1213 * / the device to host register fis. */
1214 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
1215 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1216 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1217 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1218 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1219 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1220 scic_sds_remote_device_suspend(sci_req->target_device,
1221 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1222 /* Fall through to the default case */
1223 default:
1224 /* All other completion status cause the IO to be complete. */
1225 scic_sds_stp_request_udma_complete_request(sci_req,
1226 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1227 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1228 break;
1229 }
1230
1231 return status;
1232}
1233
1234static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
1235 struct scic_sds_request *sci_req,
1236 u32 frame_index)
1237{
1238 enum sci_status status;
1239
1240 /* Use the general frame handler to copy the resposne data */
1241 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1242
1243 if (status != SCI_SUCCESS)
1244 return status;
1245
1246 scic_sds_stp_request_udma_complete_request(sci_req,
1247 SCU_TASK_DONE_CHECK_RESPONSE,
1248 SCI_FAILURE_IO_RESPONSE_VALID);
1249
1250 return status;
1251}
1252
1253/* --------------------------------------------------------------------------- */
1254
1255static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = {
1256 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1257 .abort_handler = scic_sds_request_started_state_abort_handler,
1258 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
1259 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
1260 },
1261 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1262 .abort_handler = scic_sds_request_started_state_abort_handler,
1263 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
1264 },
1265};
1266
1267static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
1268 void *object)
1269{
1270 struct scic_sds_request *sci_req = object;
1271
1272 SET_STATE_HANDLER(
1273 sci_req,
1274 scic_sds_stp_request_started_udma_substate_handler_table,
1275 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1276 );
1277}
1278
1279/**
1280 *
1281 *
1282 * This state is entered when there is an TC completion failure. The hardware
1283 * received an unexpected condition while processing the IO request and now
1284 * will UF the D2H register FIS to complete the IO.
1285 */
1286static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
1287 void *object)
1288{
1289 struct scic_sds_request *sci_req = object;
1290
1291 SET_STATE_HANDLER(
1292 sci_req,
1293 scic_sds_stp_request_started_udma_substate_handler_table,
1294 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
1295 );
1296}
1297
1298/* --------------------------------------------------------------------------- */
1299
1300static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = {
1301 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1302 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
1303 },
1304 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1305 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
1306 },
1307};
1308
1309enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
1310 u32 len,
1311 enum dma_data_direction dir)
1312{
1313 scic_sds_stp_non_ncq_request_construct(sci_req);
1314
1315 scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN,
1316 len, dir);
1317
1318 sci_base_state_machine_construct(
1319 &sci_req->started_substate_machine,
1320 sci_req,
1321 scic_sds_stp_request_started_udma_substate_table,
1322 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1323 );
1324
1325 return SCI_SUCCESS;
1326}
1327
1328/**
1329 *
1330 * @sci_req:
1331 * @completion_code:
1332 *
1333 * This method processes a TC completion. The expected TC completion is for
1334 * the transmission of the H2D register FIS containing the SATA/STP non-data
1335 * request. This method always successfully processes the TC completion.
1336 * SCI_SUCCESS This value is always returned.
1337 */
1338static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
1339 struct scic_sds_request *sci_req,
1340 u32 completion_code)
1341{
1342 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1343 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1344 scic_sds_request_set_status(
1345 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1346 );
1347
1348 sci_base_state_machine_change_state(
1349 &sci_req->started_substate_machine,
1350 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1351 );
1352 break;
1353
1354 default:
1355 /*
1356 * All other completion status cause the IO to be complete. If a NAK
1357 * was received, then it is up to the user to retry the request. */
1358 scic_sds_request_set_status(
1359 sci_req,
1360 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1361 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1362 );
1363
1364 sci_base_state_machine_change_state(
1365 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1366 break;
1367 }
1368
1369 return SCI_SUCCESS;
1370}
1371
1372/**
1373 *
1374 * @sci_req:
1375 * @completion_code:
1376 *
1377 * This method processes a TC completion. The expected TC completion is for
1378 * the transmission of the H2D register FIS containing the SATA/STP non-data
1379 * request. This method always successfully processes the TC completion.
1380 * SCI_SUCCESS This value is always returned.
1381 */
1382static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
1383 struct scic_sds_request *sci_req,
1384 u32 completion_code)
1385{
1386 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1387 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1388 scic_sds_request_set_status(
1389 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1390 );
1391
1392 sci_base_state_machine_change_state(
1393 &sci_req->started_substate_machine,
1394 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1395 );
1396 break;
1397
1398 default:
1399 /*
1400 * All other completion status cause the IO to be complete. If a NAK
1401 * was received, then it is up to the user to retry the request. */
1402 scic_sds_request_set_status(
1403 sci_req,
1404 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1405 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1406 );
1407
1408 sci_base_state_machine_change_state(&sci_req->state_machine,
1409 SCI_BASE_REQUEST_STATE_COMPLETED);
1410 break;
1411 }
1412
1413 return SCI_SUCCESS;
1414}
1415
1416/**
1417 *
1418 * @request: This parameter specifies the request for which a frame has been
1419 * received.
1420 * @frame_index: This parameter specifies the index of the frame that has been
1421 * received.
1422 *
1423 * This method processes frames received from the target while waiting for a
1424 * device to host register FIS. If a non-register FIS is received during this
1425 * time, it is treated as a protocol violation from an IO perspective. Indicate
1426 * if the received frame was processed successfully.
1427 */
1428static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
1429 struct scic_sds_request *sci_req,
1430 u32 frame_index)
1431{
1432 enum sci_status status;
1433 struct dev_to_host_fis *frame_header;
1434 u32 *frame_buffer;
1435 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1436 struct scic_sds_controller *scic = sci_req->owning_controller;
1437
1438 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1439 frame_index,
1440 (void **)&frame_header);
1441 if (status != SCI_SUCCESS) {
1442 dev_err(scic_to_dev(scic),
1443 "%s: SCIC IO Request 0x%p could not get frame header "
1444 "for frame index %d, status %x\n",
1445 __func__, stp_req, frame_index, status);
1446 return status;
1447 }
1448
1449 switch (frame_header->fis_type) {
1450 case FIS_REGD2H:
1451 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1452 frame_index,
1453 (void **)&frame_buffer);
1454
1455 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1456 frame_header,
1457 frame_buffer);
1458
1459 /* The command has completed with error */
1460 scic_sds_request_set_status(sci_req,
1461 SCU_TASK_DONE_CHECK_RESPONSE,
1462 SCI_FAILURE_IO_RESPONSE_VALID);
1463 break;
1464
1465 default:
1466 dev_warn(scic_to_dev(scic),
1467 "%s: IO Request:0x%p Frame Id:%d protocol "
1468 "violation occurred\n", __func__, stp_req,
1469 frame_index);
1470
1471 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1472 SCI_FAILURE_PROTOCOL_VIOLATION);
1473 break;
1474 }
1475
1476 sci_base_state_machine_change_state(&sci_req->state_machine,
1477 SCI_BASE_REQUEST_STATE_COMPLETED);
1478
1479 /* Frame has been decoded return it to the controller */
1480 scic_sds_controller_release_frame(scic, frame_index);
1481
1482 return status;
1483}
1484
1485/* --------------------------------------------------------------------------- */
1486
1487static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = {
1488 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1489 .abort_handler = scic_sds_request_started_state_abort_handler,
1490 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
1491 },
1492 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1493 .abort_handler = scic_sds_request_started_state_abort_handler,
1494 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
1495 },
1496 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1497 .abort_handler = scic_sds_request_started_state_abort_handler,
1498 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
1499 },
1500};
1501
1502static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
1503 void *object)
1504{
1505 struct scic_sds_request *sci_req = object;
1506
1507 SET_STATE_HANDLER(
1508 sci_req,
1509 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1510 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
1511 );
1512
1513 scic_sds_remote_device_set_working_request(
1514 sci_req->target_device, sci_req
1515 );
1516}
1517
1518static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
1519 void *object)
1520{
1521 struct scic_sds_request *sci_req = object;
1522 struct scu_task_context *task_context;
1523 struct host_to_dev_fis *h2d_fis;
1524 enum sci_status status;
1525
1526 /* Clear the SRST bit */
1527 h2d_fis = &sci_req->stp.cmd;
1528 h2d_fis->control = 0;
1529
1530 /* Clear the TC control bit */
1531 task_context = scic_sds_controller_get_task_context_buffer(
1532 sci_req->owning_controller, sci_req->io_tag);
1533 task_context->control_frame = 0;
1534
1535 status = scic_controller_continue_io(sci_req);
1536 if (status == SCI_SUCCESS) {
1537 SET_STATE_HANDLER(
1538 sci_req,
1539 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1540 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1541 );
1542 }
1543}
1544
1545static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
1546 void *object)
1547{
1548 struct scic_sds_request *sci_req = object;
1549
1550 SET_STATE_HANDLER(
1551 sci_req,
1552 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1553 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1554 );
1555}
1556
1557static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = {
1558 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1559 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
1560 },
1561 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1562 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
1563 },
1564 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1565 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
1566 },
1567};
1568
1569enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req)
1570{
1571 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1572
1573 scic_sds_stp_non_ncq_request_construct(sci_req);
1574
1575 /* Build the STP task context structure */
1576 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
1577
1578 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1579 sci_req,
1580 scic_sds_stp_request_started_soft_reset_substate_table,
1581 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
1582
1583 return SCI_SUCCESS;
1584}
diff --git a/drivers/scsi/isci/stp_request.h b/drivers/scsi/isci/stp_request.h
deleted file mode 100644
index eb14874ceda3..000000000000
--- a/drivers/scsi/isci/stp_request.h
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCIC_SDS_STP_REQUEST_T_
57#define _SCIC_SDS_STP_REQUEST_T_
58
59#include <linux/dma-mapping.h>
60#include <scsi/sas.h>
61
62struct scic_sds_stp_request {
63 union {
64 u32 ncq;
65
66 u32 udma;
67
68 struct scic_sds_stp_pio_request {
69 /**
70 * Total transfer for the entire PIO request recorded at request constuction
71 * time.
72 *
73 * @todo Should we just decrement this value for each byte of data transitted
74 * or received to elemenate the current_transfer_bytes field?
75 */
76 u32 total_transfer_bytes;
77
78 /**
79 * Total number of bytes received/transmitted in data frames since the start
80 * of the IO request. At the end of the IO request this should equal the
81 * total_transfer_bytes.
82 */
83 u32 current_transfer_bytes;
84
85 /**
86 * The number of bytes requested in the in the PIO setup.
87 */
88 u32 pio_transfer_bytes;
89
90 /**
91 * PIO Setup ending status value to tell us if we need to wait for another FIS
92 * or if the transfer is complete. On the receipt of a D2H FIS this will be
93 * the status field of that FIS.
94 */
95 u8 ending_status;
96
97 /**
98 * On receipt of a D2H FIS this will be the ending error field if the
99 * ending_status has the SATA_STATUS_ERR bit set.
100 */
101 u8 ending_error;
102
103 struct scic_sds_request_pio_sgl {
104 struct scu_sgl_element_pair *sgl_pair;
105 u8 sgl_set;
106 u32 sgl_offset;
107 } request_current;
108 } pio;
109
110 struct {
111 /**
112 * The number of bytes requested in the PIO setup before CDB data frame.
113 */
114 u32 device_preferred_cdb_length;
115 } packet;
116 } type;
117};
118
119/**
120 * enum scic_sds_stp_request_started_udma_substates - This enumeration depicts
121 * the various sub-states associated with a SATA/STP UDMA protocol operation.
122 *
123 *
124 */
125enum scic_sds_stp_request_started_udma_substates {
126 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE,
127 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE,
128};
129
130/**
131 * enum scic_sds_stp_request_started_non_data_substates - This enumeration
132 * depicts the various sub-states associated with a SATA/STP non-data
133 * protocol operation.
134 *
135 *
136 */
137enum scic_sds_stp_request_started_non_data_substates {
138 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE,
139 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE,
140};
141
142/**
143 * enum scic_sds_stp_request_started_soft_reset_substates - THis enumeration
144 * depicts the various sub-states associated with a SATA/STP soft reset
145 * operation.
146 *
147 *
148 */
149enum scic_sds_stp_request_started_soft_reset_substates {
150 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE,
151 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE,
152 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE,
153};
154
155/* This is the enumeration of the SATA PIO DATA IN started substate machine. */
156enum _scic_sds_stp_request_started_pio_substates {
157 /**
158 * While in this state the IO request object is waiting for the TC completion
159 * notification for the H2D Register FIS
160 */
161 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE,
162
163 /**
164 * While in this state the IO request object is waiting for either a PIO Setup
165 * FIS or a D2H register FIS. The type of frame received is based on the
166 * result of the prior frame and line conditions.
167 */
168 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE,
169
170 /**
171 * While in this state the IO request object is waiting for a DATA frame from
172 * the device.
173 */
174 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE,
175
176 /**
177 * While in this state the IO request object is waiting to transmit the next data
178 * frame to the device.
179 */
180 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE,
181};
182
183struct scic_sds_request;
184
185enum sci_status scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
186 bool copy_rx_frame);
187enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
188 u32 transfer_length,
189 enum dma_data_direction dir);
190enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req);
191enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req);
192enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
193 u32 transfer_length,
194 enum dma_data_direction dir);
195#endif /* _SCIC_SDS_STP_REQUEST_T_ */