aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci/request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/isci/request.c')
-rw-r--r--drivers/scsi/isci/request.c1605
1 files changed, 1476 insertions, 129 deletions
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 5201dc58a191..f503e3e18d8f 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -58,6 +58,7 @@
58#include "request.h" 58#include "request.h"
59#include "sata.h" 59#include "sata.h"
60#include "scu_completion_codes.h" 60#include "scu_completion_codes.h"
61#include "scu_event_codes.h"
61#include "sas.h" 62#include "sas.h"
62 63
63/** 64/**
@@ -92,7 +93,7 @@ static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
92 * the Scatter-Gather List. 93 * the Scatter-Gather List.
93 * 94 *
94 */ 95 */
95void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) 96static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
96{ 97{
97 struct isci_request *isci_request = sci_req_to_ireq(sds_request); 98 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
98 struct isci_host *isci_host = isci_request->isci_host; 99 struct isci_host *isci_host = isci_request->isci_host;
@@ -366,27 +367,214 @@ static void scu_ssp_task_request_construct_task_context(
366 sizeof(struct ssp_task_iu) / sizeof(u32); 367 sizeof(struct ssp_task_iu) / sizeof(u32);
367} 368}
368 369
370/**
371 * This method is will fill in the SCU Task Context for any type of SATA
372 * request. This is called from the various SATA constructors.
373 * @sci_req: The general IO request object which is to be used in
374 * constructing the SCU task context.
375 * @task_context: The buffer pointer for the SCU task context which is being
376 * constructed.
377 *
378 * The general io request construction is complete. The buffer assignment for
379 * the command buffer is complete. none Revisit task context construction to
380 * determine what is common for SSP/SMP/STP task context structures.
381 */
382static void scu_sata_reqeust_construct_task_context(
383 struct scic_sds_request *sci_req,
384 struct scu_task_context *task_context)
385{
386 dma_addr_t dma_addr;
387 struct scic_sds_controller *controller;
388 struct scic_sds_remote_device *target_device;
389 struct scic_sds_port *target_port;
390
391 controller = scic_sds_request_get_controller(sci_req);
392 target_device = scic_sds_request_get_device(sci_req);
393 target_port = scic_sds_request_get_port(sci_req);
394
395 /* Fill in the TC with the its required data */
396 task_context->abort = 0;
397 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
398 task_context->initiator_request = 1;
399 task_context->connection_rate = target_device->connection_rate;
400 task_context->protocol_engine_index =
401 scic_sds_controller_get_protocol_engine_group(controller);
402 task_context->logical_port_index =
403 scic_sds_port_get_index(target_port);
404 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
405 task_context->valid = SCU_TASK_CONTEXT_VALID;
406 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
407
408 task_context->remote_node_index =
409 scic_sds_remote_device_get_index(sci_req->target_device);
410 task_context->command_code = 0;
411
412 task_context->link_layer_control = 0;
413 task_context->do_not_dma_ssp_good_response = 1;
414 task_context->strict_ordering = 0;
415 task_context->control_frame = 0;
416 task_context->timeout_enable = 0;
417 task_context->block_guard_enable = 0;
418
419 task_context->address_modifier = 0;
420 task_context->task_phase = 0x01;
421
422 task_context->ssp_command_iu_length =
423 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
424
425 /* Set the first word of the H2D REG FIS */
426 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
427
428 if (sci_req->was_tag_assigned_by_user) {
429 /*
430 * Build the task context now since we have already read
431 * the data
432 */
433 sci_req->post_context =
434 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
435 (scic_sds_controller_get_protocol_engine_group(
436 controller) <<
437 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
438 (scic_sds_port_get_index(target_port) <<
439 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
440 scic_sds_io_tag_get_index(sci_req->io_tag));
441 } else {
442 /*
443 * Build the task context now since we have already read
444 * the data.
445 * I/O tag index is not assigned because we have to wait
446 * until we get a TCi.
447 */
448 sci_req->post_context =
449 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
450 (scic_sds_controller_get_protocol_engine_group(
451 controller) <<
452 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
453 (scic_sds_port_get_index(target_port) <<
454 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
455 }
456
457 /*
458 * Copy the physical address for the command buffer to the SCU Task
459 * Context. We must offset the command buffer by 4 bytes because the
460 * first 4 bytes are transfered in the body of the TC.
461 */
462 dma_addr = scic_io_request_get_dma_addr(sci_req,
463 ((char *) &sci_req->stp.cmd) +
464 sizeof(u32));
465
466 task_context->command_iu_upper = upper_32_bits(dma_addr);
467 task_context->command_iu_lower = lower_32_bits(dma_addr);
468
469 /* SATA Requests do not have a response buffer */
470 task_context->response_iu_upper = 0;
471 task_context->response_iu_lower = 0;
472}
473
474
369 475
370/** 476/**
371 * This method constructs the SSP Command IU data for this ssp passthrough 477 * scu_stp_raw_request_construct_task_context -
372 * comand request object. 478 * @sci_req: This parameter specifies the STP request object for which to
373 * @sci_req: This parameter specifies the request object for which the SSP 479 * construct a RAW command frame task context.
374 * command information unit is being built. 480 * @task_context: This parameter specifies the SCU specific task context buffer
481 * to construct.
375 * 482 *
376 * enum sci_status, returns invalid parameter is cdb > 16 483 * This method performs the operations common to all SATA/STP requests
484 * utilizing the raw frame method. none
377 */ 485 */
486static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
487 struct scu_task_context *task_context)
488{
489 struct scic_sds_request *sci_req = to_sci_req(stp_req);
490
491 scu_sata_reqeust_construct_task_context(sci_req, task_context);
492
493 task_context->control_frame = 0;
494 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
495 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
496 task_context->type.stp.fis_type = FIS_REGH2D;
497 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
498}
499
500static enum sci_status
501scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
502 bool copy_rx_frame)
503{
504 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
505 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
506
507 scu_stp_raw_request_construct_task_context(stp_req,
508 sci_req->task_context_buffer);
509
510 pio->current_transfer_bytes = 0;
511 pio->ending_error = 0;
512 pio->ending_status = 0;
378 513
514 pio->request_current.sgl_offset = 0;
515 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
516
517 if (copy_rx_frame) {
518 scic_sds_request_build_sgl(sci_req);
519 /* Since the IO request copy of the TC contains the same data as
520 * the actual TC this pointer is vaild for either.
521 */
522 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
523 } else {
524 /* The user does not want the data copied to the SGL buffer location */
525 pio->request_current.sgl_pair = NULL;
526 }
527
528 return SCI_SUCCESS;
529}
379 530
380/** 531/**
381 * This method constructs the SATA request object.
382 * @sci_req:
383 * @sat_protocol:
384 * @transfer_length:
385 * @data_direction:
386 * @copy_rx_frame:
387 * 532 *
388 * enum sci_status 533 * @sci_req: This parameter specifies the request to be constructed as an
534 * optimized request.
535 * @optimized_task_type: This parameter specifies whether the request is to be
536 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
537 * value of 1 indicates NCQ.
538 *
539 * This method will perform request construction common to all types of STP
540 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
541 * returns an indication as to whether the construction was successful.
389 */ 542 */
543static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
544 u8 optimized_task_type,
545 u32 len,
546 enum dma_data_direction dir)
547{
548 struct scu_task_context *task_context = sci_req->task_context_buffer;
549
550 /* Build the STP task context structure */
551 scu_sata_reqeust_construct_task_context(sci_req, task_context);
552
553 /* Copy over the SGL elements */
554 scic_sds_request_build_sgl(sci_req);
555
556 /* Copy over the number of bytes to be transfered */
557 task_context->transfer_length_bytes = len;
558
559 if (dir == DMA_TO_DEVICE) {
560 /*
561 * The difference between the DMA IN and DMA OUT request task type
562 * values are consistent with the difference between FPDMA READ
563 * and FPDMA WRITE values. Add the supplied task type parameter
564 * to this difference to set the task type properly for this
565 * DATA OUT (WRITE) case. */
566 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
567 - SCU_TASK_TYPE_DMA_IN);
568 } else {
569 /*
570 * For the DATA IN (READ) case, simply save the supplied
571 * optimized task type. */
572 task_context->task_type = optimized_task_type;
573 }
574}
575
576
577
390static enum sci_status 578static enum sci_status
391scic_io_request_construct_sata(struct scic_sds_request *sci_req, 579scic_io_request_construct_sata(struct scic_sds_request *sci_req,
392 u32 len, 580 u32 len,
@@ -402,9 +590,11 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req,
402 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 590 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
403 591
404 if (tmf->tmf_code == isci_tmf_sata_srst_high || 592 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
405 tmf->tmf_code == isci_tmf_sata_srst_low) 593 tmf->tmf_code == isci_tmf_sata_srst_low) {
406 return scic_sds_stp_soft_reset_request_construct(sci_req); 594 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
407 else { 595 sci_req->task_context_buffer);
596 return SCI_SUCCESS;
597 } else {
408 dev_err(scic_to_dev(sci_req->owning_controller), 598 dev_err(scic_to_dev(sci_req->owning_controller),
409 "%s: Request 0x%p received un-handled SAT " 599 "%s: Request 0x%p received un-handled SAT "
410 "management protocol 0x%x.\n", 600 "management protocol 0x%x.\n",
@@ -424,17 +614,27 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req,
424 } 614 }
425 615
426 /* non data */ 616 /* non data */
427 if (task->data_dir == DMA_NONE) 617 if (task->data_dir == DMA_NONE) {
428 return scic_sds_stp_non_data_request_construct(sci_req); 618 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
619 sci_req->task_context_buffer);
620 return SCI_SUCCESS;
621 }
429 622
430 /* NCQ */ 623 /* NCQ */
431 if (task->ata_task.use_ncq) 624 if (task->ata_task.use_ncq) {
432 return scic_sds_stp_ncq_request_construct(sci_req, len, dir); 625 scic_sds_stp_optimized_request_construct(sci_req,
626 SCU_TASK_TYPE_FPDMAQ_READ,
627 len, dir);
628 return SCI_SUCCESS;
629 }
433 630
434 /* DMA */ 631 /* DMA */
435 if (task->ata_task.dma_xfer) 632 if (task->ata_task.dma_xfer) {
436 return scic_sds_stp_udma_request_construct(sci_req, len, dir); 633 scic_sds_stp_optimized_request_construct(sci_req,
437 else /* PIO */ 634 SCU_TASK_TYPE_DMA_IN,
635 len, dir);
636 return SCI_SUCCESS;
637 } else /* PIO */
438 return scic_sds_stp_pio_request_construct(sci_req, copy); 638 return scic_sds_stp_pio_request_construct(sci_req, copy);
439 639
440 return status; 640 return status;
@@ -453,9 +653,8 @@ static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_reque
453 653
454 scic_sds_io_request_build_ssp_command_iu(sci_req); 654 scic_sds_io_request_build_ssp_command_iu(sci_req);
455 655
456 sci_base_state_machine_change_state( 656 sci_base_state_machine_change_state(&sci_req->state_machine,
457 &sci_req->state_machine, 657 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
458 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
459 658
460 return SCI_SUCCESS; 659 return SCI_SUCCESS;
461} 660}
@@ -470,12 +669,11 @@ enum sci_status scic_task_request_construct_ssp(
470 scic_sds_task_request_build_ssp_task_iu(sci_req); 669 scic_sds_task_request_build_ssp_task_iu(sci_req);
471 670
472 sci_base_state_machine_change_state(&sci_req->state_machine, 671 sci_base_state_machine_change_state(&sci_req->state_machine,
473 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 672 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
474 673
475 return SCI_SUCCESS; 674 return SCI_SUCCESS;
476} 675}
477 676
478
479static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) 677static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
480{ 678{
481 enum sci_status status; 679 enum sci_status status;
@@ -496,12 +694,11 @@ static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_requ
496 694
497 if (status == SCI_SUCCESS) 695 if (status == SCI_SUCCESS)
498 sci_base_state_machine_change_state(&sci_req->state_machine, 696 sci_base_state_machine_change_state(&sci_req->state_machine,
499 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 697 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
500 698
501 return status; 699 return status;
502} 700}
503 701
504
505enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) 702enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
506{ 703{
507 enum sci_status status = SCI_SUCCESS; 704 enum sci_status status = SCI_SUCCESS;
@@ -513,7 +710,8 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re
513 710
514 if (tmf->tmf_code == isci_tmf_sata_srst_high || 711 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
515 tmf->tmf_code == isci_tmf_sata_srst_low) { 712 tmf->tmf_code == isci_tmf_sata_srst_low) {
516 status = scic_sds_stp_soft_reset_request_construct(sci_req); 713 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
714 sci_req->task_context_buffer);
517 } else { 715 } else {
518 dev_err(scic_to_dev(sci_req->owning_controller), 716 dev_err(scic_to_dev(sci_req->owning_controller),
519 "%s: Request 0x%p received un-handled SAT " 717 "%s: Request 0x%p received un-handled SAT "
@@ -524,10 +722,10 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re
524 } 722 }
525 } 723 }
526 724
527 if (status == SCI_SUCCESS) 725 if (status != SCI_SUCCESS)
528 sci_base_state_machine_change_state( 726 return status;
529 &sci_req->state_machine, 727 sci_base_state_machine_change_state(&sci_req->state_machine,
530 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 728 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
531 729
532 return status; 730 return status;
533} 731}
@@ -724,7 +922,7 @@ static enum sci_status scic_sds_request_constructed_state_start_handler(
724 922
725 /* Everything is good go ahead and change state */ 923 /* Everything is good go ahead and change state */
726 sci_base_state_machine_change_state(&request->state_machine, 924 sci_base_state_machine_change_state(&request->state_machine,
727 SCI_BASE_REQUEST_STATE_STARTED); 925 SCI_BASE_REQUEST_STATE_STARTED);
728 926
729 return SCI_SUCCESS; 927 return SCI_SUCCESS;
730 } 928 }
@@ -749,29 +947,14 @@ static enum sci_status scic_sds_request_constructed_state_abort_handler(
749 SCI_FAILURE_IO_TERMINATED); 947 SCI_FAILURE_IO_TERMINATED);
750 948
751 sci_base_state_machine_change_state(&request->state_machine, 949 sci_base_state_machine_change_state(&request->state_machine,
752 SCI_BASE_REQUEST_STATE_COMPLETED); 950 SCI_BASE_REQUEST_STATE_COMPLETED);
753 return SCI_SUCCESS; 951 return SCI_SUCCESS;
754} 952}
755 953
756/* 954static enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req)
757 * *****************************************************************************
758 * * STARTED STATE HANDLERS
759 * ***************************************************************************** */
760
761/*
762 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
763 * object receives a scic_sds_request_terminate() request. Since the request
764 * has been posted to the hardware the io request state is changed to the
765 * aborting state. enum sci_status SCI_SUCCESS
766 */
767enum sci_status scic_sds_request_started_state_abort_handler(
768 struct scic_sds_request *request)
769{ 955{
770 if (request->has_started_substate_machine) 956 sci_base_state_machine_change_state(&sci_req->state_machine,
771 sci_base_state_machine_stop(&request->started_substate_machine); 957 SCI_BASE_REQUEST_STATE_ABORTING);
772
773 sci_base_state_machine_change_state(&request->state_machine,
774 SCI_BASE_REQUEST_STATE_ABORTING);
775 return SCI_SUCCESS; 958 return SCI_SUCCESS;
776} 959}
777 960
@@ -943,19 +1126,15 @@ scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sc
943 */ 1126 */
944 1127
945 /* In all cases we will treat this as the completion of the IO req. */ 1128 /* In all cases we will treat this as the completion of the IO req. */
946 sci_base_state_machine_change_state( 1129 sci_base_state_machine_change_state(&sci_req->state_machine,
947 &sci_req->state_machine, 1130 SCI_BASE_REQUEST_STATE_COMPLETED);
948 SCI_BASE_REQUEST_STATE_COMPLETED);
949 return SCI_SUCCESS; 1131 return SCI_SUCCESS;
950} 1132}
951 1133
952enum sci_status 1134enum sci_status
953scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code) 1135scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
954{ 1136{
955 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED && 1137 if (request->state_handlers->tc_completion_handler)
956 request->has_started_substate_machine == false)
957 return scic_sds_request_started_state_tc_completion_handler(request, completion_code);
958 else if (request->state_handlers->tc_completion_handler)
959 return request->state_handlers->tc_completion_handler(request, completion_code); 1138 return request->state_handlers->tc_completion_handler(request, completion_code);
960 1139
961 dev_warn(scic_to_dev(request->owning_controller), 1140 dev_warn(scic_to_dev(request->owning_controller),
@@ -1064,7 +1243,7 @@ static enum sci_status scic_sds_request_completed_state_complete_handler(
1064 } 1243 }
1065 1244
1066 sci_base_state_machine_change_state(&request->state_machine, 1245 sci_base_state_machine_change_state(&request->state_machine,
1067 SCI_BASE_REQUEST_STATE_FINAL); 1246 SCI_BASE_REQUEST_STATE_FINAL);
1068 return SCI_SUCCESS; 1247 return SCI_SUCCESS;
1069} 1248}
1070 1249
@@ -1084,7 +1263,7 @@ static enum sci_status scic_sds_request_aborting_state_abort_handler(
1084 struct scic_sds_request *request) 1263 struct scic_sds_request *request)
1085{ 1264{
1086 sci_base_state_machine_change_state(&request->state_machine, 1265 sci_base_state_machine_change_state(&request->state_machine,
1087 SCI_BASE_REQUEST_STATE_COMPLETED); 1266 SCI_BASE_REQUEST_STATE_COMPLETED);
1088 return SCI_SUCCESS; 1267 return SCI_SUCCESS;
1089} 1268}
1090 1269
@@ -1107,7 +1286,7 @@ static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
1107 ); 1286 );
1108 1287
1109 sci_base_state_machine_change_state(&sci_req->state_machine, 1288 sci_base_state_machine_change_state(&sci_req->state_machine,
1110 SCI_BASE_REQUEST_STATE_COMPLETED); 1289 SCI_BASE_REQUEST_STATE_COMPLETED);
1111 break; 1290 break;
1112 1291
1113 default: 1292 default:
@@ -1161,7 +1340,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi
1161 SCI_SUCCESS); 1340 SCI_SUCCESS);
1162 1341
1163 sci_base_state_machine_change_state(&sci_req->state_machine, 1342 sci_base_state_machine_change_state(&sci_req->state_machine,
1164 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); 1343 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1165 break; 1344 break;
1166 1345
1167 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1346 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
@@ -1178,7 +1357,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi
1178 completion_code); 1357 completion_code);
1179 1358
1180 sci_base_state_machine_change_state(&sci_req->state_machine, 1359 sci_base_state_machine_change_state(&sci_req->state_machine,
1181 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); 1360 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1182 break; 1361 break;
1183 1362
1184 default: 1363 default:
@@ -1192,7 +1371,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi
1192 ); 1371 );
1193 1372
1194 sci_base_state_machine_change_state(&sci_req->state_machine, 1373 sci_base_state_machine_change_state(&sci_req->state_machine,
1195 SCI_BASE_REQUEST_STATE_COMPLETED); 1374 SCI_BASE_REQUEST_STATE_COMPLETED);
1196 break; 1375 break;
1197 } 1376 }
1198 1377
@@ -1215,9 +1394,9 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler
1215 struct scic_sds_request *request) 1394 struct scic_sds_request *request)
1216{ 1395{
1217 sci_base_state_machine_change_state(&request->state_machine, 1396 sci_base_state_machine_change_state(&request->state_machine,
1218 SCI_BASE_REQUEST_STATE_ABORTING); 1397 SCI_BASE_REQUEST_STATE_ABORTING);
1219 sci_base_state_machine_change_state(&request->state_machine, 1398 sci_base_state_machine_change_state(&request->state_machine,
1220 SCI_BASE_REQUEST_STATE_COMPLETED); 1399 SCI_BASE_REQUEST_STATE_COMPLETED);
1221 return SCI_SUCCESS; 1400 return SCI_SUCCESS;
1222} 1401}
1223 1402
@@ -1243,7 +1422,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler
1243 scic_sds_io_request_copy_response(request); 1422 scic_sds_io_request_copy_response(request);
1244 1423
1245 sci_base_state_machine_change_state(&request->state_machine, 1424 sci_base_state_machine_change_state(&request->state_machine,
1246 SCI_BASE_REQUEST_STATE_COMPLETED); 1425 SCI_BASE_REQUEST_STATE_COMPLETED);
1247 scic_sds_controller_release_frame(request->owning_controller, 1426 scic_sds_controller_release_frame(request->owning_controller,
1248 frame_index); 1427 frame_index);
1249 return SCI_SUCCESS; 1428 return SCI_SUCCESS;
@@ -1270,13 +1449,11 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler
1270 /* 1449 /*
1271 * In the AWAIT RESPONSE state, any TC completion is unexpected. 1450 * In the AWAIT RESPONSE state, any TC completion is unexpected.
1272 * but if the TC has success status, we complete the IO anyway. */ 1451 * but if the TC has success status, we complete the IO anyway. */
1273 scic_sds_request_set_status( 1452 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1274 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS 1453 SCI_SUCCESS);
1275 );
1276 1454
1277 sci_base_state_machine_change_state( 1455 sci_base_state_machine_change_state(&sci_req->state_machine,
1278 &sci_req->state_machine, 1456 SCI_BASE_REQUEST_STATE_COMPLETED);
1279 SCI_BASE_REQUEST_STATE_COMPLETED);
1280 break; 1457 break;
1281 1458
1282 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1459 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
@@ -1288,13 +1465,11 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler
1288 * is not able to send smp response within 2 ms. This causes our hardware 1465 * is not able to send smp response within 2 ms. This causes our hardware
1289 * break the connection and set TC completion with one of these SMP_XXX_XX_ERR 1466 * break the connection and set TC completion with one of these SMP_XXX_XX_ERR
1290 * status. For these type of error, we ask scic user to retry the request. */ 1467 * status. For these type of error, we ask scic user to retry the request. */
1291 scic_sds_request_set_status( 1468 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1292 sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED 1469 SCI_FAILURE_RETRY_REQUIRED);
1293 );
1294 1470
1295 sci_base_state_machine_change_state( 1471 sci_base_state_machine_change_state(&sci_req->state_machine,
1296 &sci_req->state_machine, 1472 SCI_BASE_REQUEST_STATE_COMPLETED);
1297 SCI_BASE_REQUEST_STATE_COMPLETED);
1298 break; 1473 break;
1299 1474
1300 default: 1475 default:
@@ -1307,9 +1482,8 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler
1307 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR 1482 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1308 ); 1483 );
1309 1484
1310 sci_base_state_machine_change_state( 1485 sci_base_state_machine_change_state(&sci_req->state_machine,
1311 &sci_req->state_machine, 1486 SCI_BASE_REQUEST_STATE_COMPLETED);
1312 SCI_BASE_REQUEST_STATE_COMPLETED);
1313 break; 1487 break;
1314 } 1488 }
1315 1489
@@ -1365,7 +1539,7 @@ scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_r
1365 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); 1539 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1366 1540
1367 sci_base_state_machine_change_state(&sci_req->state_machine, 1541 sci_base_state_machine_change_state(&sci_req->state_machine,
1368 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); 1542 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION);
1369 } else { 1543 } else {
1370 /* This was not a response frame why did it get forwarded? */ 1544 /* This was not a response frame why did it get forwarded? */
1371 dev_err(scic_to_dev(sci_req->owning_controller), 1545 dev_err(scic_to_dev(sci_req->owning_controller),
@@ -1381,9 +1555,8 @@ scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_r
1381 SCU_TASK_DONE_SMP_FRM_TYPE_ERR, 1555 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1382 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1556 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1383 1557
1384 sci_base_state_machine_change_state( 1558 sci_base_state_machine_change_state(&sci_req->state_machine,
1385 &sci_req->state_machine, 1559 SCI_BASE_REQUEST_STATE_COMPLETED);
1386 SCI_BASE_REQUEST_STATE_COMPLETED);
1387 } 1560 }
1388 1561
1389 scic_sds_controller_release_frame(sci_req->owning_controller, 1562 scic_sds_controller_release_frame(sci_req->owning_controller,
@@ -1411,14 +1584,111 @@ static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_ha
1411{ 1584{
1412 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1585 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1413 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1586 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1587 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1588 SCI_SUCCESS);
1589
1590 sci_base_state_machine_change_state(&sci_req->state_machine,
1591 SCI_BASE_REQUEST_STATE_COMPLETED);
1592 break;
1593
1594 default:
1595 /*
1596 * All other completion status cause the IO to be complete. If a NAK
1597 * was received, then it is up to the user to retry the request. */
1414 scic_sds_request_set_status( 1598 scic_sds_request_set_status(
1415 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS 1599 sci_req,
1600 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1601 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1416 ); 1602 );
1417 1603
1418 sci_base_state_machine_change_state( 1604 sci_base_state_machine_change_state(
1419 &sci_req->state_machine, 1605 &sci_req->state_machine,
1420 SCI_BASE_REQUEST_STATE_COMPLETED); 1606 SCI_BASE_REQUEST_STATE_COMPLETED);
1421 break; 1607 break;
1608 }
1609
1610 return SCI_SUCCESS;
1611}
1612
1613void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1614 u16 ncq_tag)
1615{
1616 /**
1617 * @note This could be made to return an error to the user if the user
1618 * attempts to set the NCQ tag in the wrong state.
1619 */
1620 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
1621}
1622
1623/**
1624 *
1625 * @sci_req:
1626 *
1627 * Get the next SGL element from the request. - Check on which SGL element pair
1628 * we are working - if working on SLG pair element A - advance to element B -
1629 * else - check to see if there are more SGL element pairs for this IO request
1630 * - if there are more SGL element pairs - advance to the next pair and return
1631 * element A struct scu_sgl_element*
1632 */
1633static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
1634{
1635 struct scu_sgl_element *current_sgl;
1636 struct scic_sds_request *sci_req = to_sci_req(stp_req);
1637 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1638
1639 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1640 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
1641 pio_sgl->sgl_pair->B.address_upper == 0) {
1642 current_sgl = NULL;
1643 } else {
1644 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1645 current_sgl = &pio_sgl->sgl_pair->B;
1646 }
1647 } else {
1648 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
1649 pio_sgl->sgl_pair->next_pair_upper == 0) {
1650 current_sgl = NULL;
1651 } else {
1652 u64 phys_addr;
1653
1654 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
1655 phys_addr <<= 32;
1656 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
1657
1658 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
1659 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1660 current_sgl = &pio_sgl->sgl_pair->A;
1661 }
1662 }
1663
1664 return current_sgl;
1665}
1666
1667/**
1668 *
1669 * @sci_req:
1670 * @completion_code:
1671 *
1672 * This method processes a TC completion. The expected TC completion is for
1673 * the transmission of the H2D register FIS containing the SATA/STP non-data
1674 * request. This method always successfully processes the TC completion.
1675 * SCI_SUCCESS This value is always returned.
1676 */
1677static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
1678 struct scic_sds_request *sci_req,
1679 u32 completion_code)
1680{
1681 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1682 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1683 scic_sds_request_set_status(
1684 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1685 );
1686
1687 sci_base_state_machine_change_state(
1688 &sci_req->state_machine,
1689 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
1690 );
1691 break;
1422 1692
1423 default: 1693 default:
1424 /* 1694 /*
@@ -1431,14 +1701,861 @@ static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_ha
1431 ); 1701 );
1432 1702
1433 sci_base_state_machine_change_state( 1703 sci_base_state_machine_change_state(
1704 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1705 break;
1706 }
1707
1708 return SCI_SUCCESS;
1709}
1710
1711/**
1712 *
1713 * @request: This parameter specifies the request for which a frame has been
1714 * received.
1715 * @frame_index: This parameter specifies the index of the frame that has been
1716 * received.
1717 *
1718 * This method processes frames received from the target while waiting for a
1719 * device to host register FIS. If a non-register FIS is received during this
1720 * time, it is treated as a protocol violation from an IO perspective. Indicate
1721 * if the received frame was processed successfully.
1722 */
1723static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
1724 struct scic_sds_request *sci_req,
1725 u32 frame_index)
1726{
1727 enum sci_status status;
1728 struct dev_to_host_fis *frame_header;
1729 u32 *frame_buffer;
1730 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1731 struct scic_sds_controller *scic = sci_req->owning_controller;
1732
1733 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1734 frame_index,
1735 (void **)&frame_header);
1736
1737 if (status != SCI_SUCCESS) {
1738 dev_err(scic_to_dev(sci_req->owning_controller),
1739 "%s: SCIC IO Request 0x%p could not get frame header "
1740 "for frame index %d, status %x\n",
1741 __func__, stp_req, frame_index, status);
1742
1743 return status;
1744 }
1745
1746 switch (frame_header->fis_type) {
1747 case FIS_REGD2H:
1748 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1749 frame_index,
1750 (void **)&frame_buffer);
1751
1752 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1753 frame_header,
1754 frame_buffer);
1755
1756 /* The command has completed with error */
1757 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1758 SCI_FAILURE_IO_RESPONSE_VALID);
1759 break;
1760
1761 default:
1762 dev_warn(scic_to_dev(scic),
1763 "%s: IO Request:0x%p Frame Id:%d protocol "
1764 "violation occurred\n", __func__, stp_req,
1765 frame_index);
1766
1767 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1768 SCI_FAILURE_PROTOCOL_VIOLATION);
1769 break;
1770 }
1771
1772 sci_base_state_machine_change_state(&sci_req->state_machine,
1773 SCI_BASE_REQUEST_STATE_COMPLETED);
1774
1775 /* Frame has been decoded return it to the controller */
1776 scic_sds_controller_release_frame(scic, frame_index);
1777
1778 return status;
1779}
1780
1781#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1782
1783/* transmit DATA_FIS from (current sgl + offset) for input
1784 * parameter length. current sgl and offset is alreay stored in the IO request
1785 */
1786static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1787 struct scic_sds_request *sci_req,
1788 u32 length)
1789{
1790 struct scic_sds_controller *scic = sci_req->owning_controller;
1791 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1792 struct scu_task_context *task_context;
1793 struct scu_sgl_element *current_sgl;
1794
1795 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1796 * for the data from current_sgl+offset for the input length
1797 */
1798 task_context = scic_sds_controller_get_task_context_buffer(scic,
1799 sci_req->io_tag);
1800
1801 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1802 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
1803 else
1804 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
1805
1806 /* update the TC */
1807 task_context->command_iu_upper = current_sgl->address_upper;
1808 task_context->command_iu_lower = current_sgl->address_lower;
1809 task_context->transfer_length_bytes = length;
1810 task_context->type.stp.fis_type = FIS_DATA;
1811
1812 /* send the new TC out. */
1813 return scic_controller_continue_io(sci_req);
1814}
1815
1816static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1817{
1818
1819 struct scu_sgl_element *current_sgl;
1820 u32 sgl_offset;
1821 u32 remaining_bytes_in_current_sgl = 0;
1822 enum sci_status status = SCI_SUCCESS;
1823 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1824
1825 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1826
1827 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1828 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
1829 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
1830 } else {
1831 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
1832 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
1833 }
1834
1835
1836 if (stp_req->type.pio.pio_transfer_bytes > 0) {
1837 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1838 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1839 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1840 if (status == SCI_SUCCESS) {
1841 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1842
1843 /* update the current sgl, sgl_offset and save for future */
1844 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
1845 sgl_offset = 0;
1846 }
1847 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1848 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1849 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1850
1851 if (status == SCI_SUCCESS) {
1852 /* Sgl offset will be adjusted and saved for future */
1853 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1854 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1855 stp_req->type.pio.pio_transfer_bytes = 0;
1856 }
1857 }
1858 }
1859
1860 if (status == SCI_SUCCESS) {
1861 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1862 }
1863
1864 return status;
1865}
1866
1867/**
1868 *
1869 * @stp_request: The request that is used for the SGL processing.
1870 * @data_buffer: The buffer of data to be copied.
1871 * @length: The length of the data transfer.
1872 *
1873 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1874 * specified data region. enum sci_status
1875 */
1876static enum sci_status
1877scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1878 u8 *data_buf, u32 len)
1879{
1880 struct scic_sds_request *sci_req;
1881 struct isci_request *ireq;
1882 u8 *src_addr;
1883 int copy_len;
1884 struct sas_task *task;
1885 struct scatterlist *sg;
1886 void *kaddr;
1887 int total_len = len;
1888
1889 sci_req = to_sci_req(stp_req);
1890 ireq = sci_req_to_ireq(sci_req);
1891 task = isci_request_access_task(ireq);
1892 src_addr = data_buf;
1893
1894 if (task->num_scatter > 0) {
1895 sg = task->scatter;
1896
1897 while (total_len > 0) {
1898 struct page *page = sg_page(sg);
1899
1900 copy_len = min_t(int, total_len, sg_dma_len(sg));
1901 kaddr = kmap_atomic(page, KM_IRQ0);
1902 memcpy(kaddr + sg->offset, src_addr, copy_len);
1903 kunmap_atomic(kaddr, KM_IRQ0);
1904 total_len -= copy_len;
1905 src_addr += copy_len;
1906 sg = sg_next(sg);
1907 }
1908 } else {
1909 BUG_ON(task->total_xfer_len < total_len);
1910 memcpy(task->scatter, src_addr, total_len);
1911 }
1912
1913 return SCI_SUCCESS;
1914}
1915
1916/**
1917 *
1918 * @sci_req: The PIO DATA IN request that is to receive the data.
1919 * @data_buffer: The buffer to copy from.
1920 *
1921 * Copy the data buffer to the io request data region. enum sci_status
1922 */
1923static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1924 struct scic_sds_stp_request *sci_req,
1925 u8 *data_buffer)
1926{
1927 enum sci_status status;
1928
1929 /*
1930 * If there is less than 1K remaining in the transfer request
1931 * copy just the data for the transfer */
1932 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1933 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1934 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1935
1936 if (status == SCI_SUCCESS)
1937 sci_req->type.pio.pio_transfer_bytes = 0;
1938 } else {
1939 /* We are transfering the whole frame so copy */
1940 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1941 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1942
1943 if (status == SCI_SUCCESS)
1944 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1945 }
1946
1947 return status;
1948}
1949
1950/**
1951 *
1952 * @sci_req:
1953 * @completion_code:
1954 *
1955 * enum sci_status
1956 */
1957static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
1958 struct scic_sds_request *sci_req,
1959 u32 completion_code)
1960{
1961 enum sci_status status = SCI_SUCCESS;
1962
1963 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1964 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1965 scic_sds_request_set_status(
1966 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1967 );
1968
1969 sci_base_state_machine_change_state(
1434 &sci_req->state_machine, 1970 &sci_req->state_machine,
1435 SCI_BASE_REQUEST_STATE_COMPLETED); 1971 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1972 );
1973 break;
1974
1975 default:
1976 /*
1977 * All other completion status cause the IO to be complete. If a NAK
1978 * was received, then it is up to the user to retry the request. */
1979 scic_sds_request_set_status(
1980 sci_req,
1981 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1982 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1983 );
1984
1985 sci_base_state_machine_change_state(
1986 &sci_req->state_machine,
1987 SCI_BASE_REQUEST_STATE_COMPLETED
1988 );
1989 break;
1990 }
1991
1992 return status;
1993}
1994
1995static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
1996 u32 frame_index)
1997{
1998 struct scic_sds_controller *scic = sci_req->owning_controller;
1999 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2000 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2001 struct sas_task *task = isci_request_access_task(ireq);
2002 struct dev_to_host_fis *frame_header;
2003 enum sci_status status;
2004 u32 *frame_buffer;
2005
2006 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2007 frame_index,
2008 (void **)&frame_header);
2009
2010 if (status != SCI_SUCCESS) {
2011 dev_err(scic_to_dev(scic),
2012 "%s: SCIC IO Request 0x%p could not get frame header "
2013 "for frame index %d, status %x\n",
2014 __func__, stp_req, frame_index, status);
2015 return status;
2016 }
2017
2018 switch (frame_header->fis_type) {
2019 case FIS_PIO_SETUP:
2020 /* Get from the frame buffer the PIO Setup Data */
2021 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2022 frame_index,
2023 (void **)&frame_buffer);
2024
2025 /* Get the data from the PIO Setup The SCU Hardware returns
2026 * first word in the frame_header and the rest of the data is in
2027 * the frame buffer so we need to back up one dword
2028 */
2029
2030 /* transfer_count: first 16bits in the 4th dword */
2031 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
2032
2033 /* ending_status: 4th byte in the 3rd dword */
2034 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
2035
2036 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2037 frame_header,
2038 frame_buffer);
2039
2040 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
2041
2042 /* The next state is dependent on whether the
2043 * request was PIO Data-in or Data out
2044 */
2045 if (task->data_dir == DMA_FROM_DEVICE) {
2046 sci_base_state_machine_change_state(&sci_req->state_machine,
2047 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
2048 } else if (task->data_dir == DMA_TO_DEVICE) {
2049 /* Transmit data */
2050 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
2051 if (status != SCI_SUCCESS)
2052 break;
2053 sci_base_state_machine_change_state(&sci_req->state_machine,
2054 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
2055 }
2056 break;
2057 case FIS_SETDEVBITS:
2058 sci_base_state_machine_change_state(&sci_req->state_machine,
2059 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2060 break;
2061 case FIS_REGD2H:
2062 if (frame_header->status & ATA_BUSY) {
2063 /* Now why is the drive sending a D2H Register FIS when
2064 * it is still busy? Do nothing since we are still in
2065 * the right state.
2066 */
2067 dev_dbg(scic_to_dev(scic),
2068 "%s: SCIC PIO Request 0x%p received "
2069 "D2H Register FIS with BSY status "
2070 "0x%x\n", __func__, stp_req,
2071 frame_header->status);
2072 break;
2073 }
2074
2075 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2076 frame_index,
2077 (void **)&frame_buffer);
2078
2079 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
2080 frame_header,
2081 frame_buffer);
2082
2083 scic_sds_request_set_status(sci_req,
2084 SCU_TASK_DONE_CHECK_RESPONSE,
2085 SCI_FAILURE_IO_RESPONSE_VALID);
2086
2087 sci_base_state_machine_change_state(&sci_req->state_machine,
2088 SCI_BASE_REQUEST_STATE_COMPLETED);
2089 break;
2090 default:
2091 /* FIXME: what do we do here? */
2092 break;
2093 }
2094
2095 /* Frame is decoded return it to the controller */
2096 scic_sds_controller_release_frame(scic, frame_index);
2097
2098 return status;
2099}
2100
2101static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
2102 u32 frame_index)
2103{
2104 enum sci_status status;
2105 struct dev_to_host_fis *frame_header;
2106 struct sata_fis_data *frame_buffer;
2107 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2108 struct scic_sds_controller *scic = sci_req->owning_controller;
2109
2110 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2111 frame_index,
2112 (void **)&frame_header);
2113
2114 if (status != SCI_SUCCESS) {
2115 dev_err(scic_to_dev(scic),
2116 "%s: SCIC IO Request 0x%p could not get frame header "
2117 "for frame index %d, status %x\n",
2118 __func__, stp_req, frame_index, status);
2119 return status;
2120 }
2121
2122 if (frame_header->fis_type == FIS_DATA) {
2123 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
2124 sci_req->saved_rx_frame_index = frame_index;
2125 stp_req->type.pio.pio_transfer_bytes = 0;
2126 } else {
2127 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2128 frame_index,
2129 (void **)&frame_buffer);
2130
2131 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
2132 (u8 *)frame_buffer);
2133
2134 /* Frame is decoded return it to the controller */
2135 scic_sds_controller_release_frame(scic, frame_index);
2136 }
2137
2138 /* Check for the end of the transfer, are there more
2139 * bytes remaining for this data transfer
2140 */
2141 if (status != SCI_SUCCESS ||
2142 stp_req->type.pio.pio_transfer_bytes != 0)
2143 return status;
2144
2145 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
2146 scic_sds_request_set_status(sci_req,
2147 SCU_TASK_DONE_CHECK_RESPONSE,
2148 SCI_FAILURE_IO_RESPONSE_VALID);
2149
2150 sci_base_state_machine_change_state(&sci_req->state_machine,
2151 SCI_BASE_REQUEST_STATE_COMPLETED);
2152 } else {
2153 sci_base_state_machine_change_state(&sci_req->state_machine,
2154 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2155 }
2156 } else {
2157 dev_err(scic_to_dev(scic),
2158 "%s: SCIC PIO Request 0x%p received frame %d "
2159 "with fis type 0x%02x when expecting a data "
2160 "fis.\n", __func__, stp_req, frame_index,
2161 frame_header->fis_type);
2162
2163 scic_sds_request_set_status(sci_req,
2164 SCU_TASK_DONE_GOOD,
2165 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
2166
2167 sci_base_state_machine_change_state(&sci_req->state_machine,
2168 SCI_BASE_REQUEST_STATE_COMPLETED);
2169
2170 /* Frame is decoded return it to the controller */
2171 scic_sds_controller_release_frame(scic, frame_index);
2172 }
2173
2174 return status;
2175}
2176
2177
2178/**
2179 *
2180 * @sci_req:
2181 * @completion_code:
2182 *
2183 * enum sci_status
2184 */
2185static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
2186
2187 struct scic_sds_request *sci_req,
2188 u32 completion_code)
2189{
2190 enum sci_status status = SCI_SUCCESS;
2191 bool all_frames_transferred = false;
2192 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2193
2194 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2195 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2196 /* Transmit data */
2197 if (stp_req->type.pio.pio_transfer_bytes != 0) {
2198 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
2199 if (status == SCI_SUCCESS) {
2200 if (stp_req->type.pio.pio_transfer_bytes == 0)
2201 all_frames_transferred = true;
2202 }
2203 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
2204 /*
2205 * this will happen if the all data is written at the
2206 * first time after the pio setup fis is received
2207 */
2208 all_frames_transferred = true;
2209 }
2210
2211 /* all data transferred. */
2212 if (all_frames_transferred) {
2213 /*
2214 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
2215 * and wait for PIO_SETUP fis / or D2H REg fis. */
2216 sci_base_state_machine_change_state(
2217 &sci_req->state_machine,
2218 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
2219 );
2220 }
2221 break;
2222
2223 default:
2224 /*
2225 * All other completion status cause the IO to be complete. If a NAK
2226 * was received, then it is up to the user to retry the request. */
2227 scic_sds_request_set_status(
2228 sci_req,
2229 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2230 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2231 );
2232
2233 sci_base_state_machine_change_state(
2234 &sci_req->state_machine,
2235 SCI_BASE_REQUEST_STATE_COMPLETED
2236 );
2237 break;
2238 }
2239
2240 return status;
2241}
2242
2243/**
2244 *
2245 * @request: This is the request which is receiving the event.
2246 * @event_code: This is the event code that the request on which the request is
2247 * expected to take action.
2248 *
2249 * This method will handle any link layer events while waiting for the data
2250 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
2251 */
2252static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
2253 struct scic_sds_request *request,
2254 u32 event_code)
2255{
2256 enum sci_status status;
2257
2258 switch (scu_get_event_specifier(event_code)) {
2259 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
2260 /*
2261 * We are waiting for data and the SCU has R_ERR the data frame.
2262 * Go back to waiting for the D2H Register FIS */
2263 sci_base_state_machine_change_state(
2264 &request->state_machine,
2265 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
2266 );
2267
2268 status = SCI_SUCCESS;
2269 break;
2270
2271 default:
2272 dev_err(scic_to_dev(request->owning_controller),
2273 "%s: SCIC PIO Request 0x%p received unexpected "
2274 "event 0x%08x\n",
2275 __func__, request, event_code);
2276
2277 /* / @todo Should we fail the PIO request when we get an unexpected event? */
2278 status = SCI_FAILURE;
2279 break;
2280 }
2281
2282 return status;
2283}
2284
2285static void scic_sds_stp_request_udma_complete_request(
2286 struct scic_sds_request *request,
2287 u32 scu_status,
2288 enum sci_status sci_status)
2289{
2290 scic_sds_request_set_status(request, scu_status, sci_status);
2291 sci_base_state_machine_change_state(&request->state_machine,
2292 SCI_BASE_REQUEST_STATE_COMPLETED);
2293}
2294
2295static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
2296 u32 frame_index)
2297{
2298 struct scic_sds_controller *scic = sci_req->owning_controller;
2299 struct dev_to_host_fis *frame_header;
2300 enum sci_status status;
2301 u32 *frame_buffer;
2302
2303 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2304 frame_index,
2305 (void **)&frame_header);
2306
2307 if ((status == SCI_SUCCESS) &&
2308 (frame_header->fis_type == FIS_REGD2H)) {
2309 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2310 frame_index,
2311 (void **)&frame_buffer);
2312
2313 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2314 frame_header,
2315 frame_buffer);
2316 }
2317
2318 scic_sds_controller_release_frame(scic, frame_index);
2319
2320 return status;
2321}
2322
2323static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
2324 struct scic_sds_request *sci_req,
2325 u32 completion_code)
2326{
2327 enum sci_status status = SCI_SUCCESS;
2328
2329 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2330 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2331 scic_sds_stp_request_udma_complete_request(sci_req,
2332 SCU_TASK_DONE_GOOD,
2333 SCI_SUCCESS);
2334 break;
2335 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2336 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2337 /*
2338 * We must check ther response buffer to see if the D2H Register FIS was
2339 * received before we got the TC completion. */
2340 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
2341 scic_sds_remote_device_suspend(sci_req->target_device,
2342 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2343
2344 scic_sds_stp_request_udma_complete_request(sci_req,
2345 SCU_TASK_DONE_CHECK_RESPONSE,
2346 SCI_FAILURE_IO_RESPONSE_VALID);
2347 } else {
2348 /*
2349 * If we have an error completion status for the TC then we can expect a
2350 * D2H register FIS from the device so we must change state to wait for it */
2351 sci_base_state_machine_change_state(&sci_req->state_machine,
2352 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
2353 }
2354 break;
2355
2356 /*
2357 * / @todo Check to see if any of these completion status need to wait for
2358 * / the device to host register fis. */
2359 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
2360 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2361 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2362 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2363 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2364 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
2365 scic_sds_remote_device_suspend(sci_req->target_device,
2366 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2367 /* Fall through to the default case */
2368 default:
2369 /* All other completion status cause the IO to be complete. */
2370 scic_sds_stp_request_udma_complete_request(sci_req,
2371 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2372 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2373 break;
2374 }
2375
2376 return status;
2377}
2378
2379static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
2380 struct scic_sds_request *sci_req,
2381 u32 frame_index)
2382{
2383 enum sci_status status;
2384
2385 /* Use the general frame handler to copy the resposne data */
2386 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
2387
2388 if (status != SCI_SUCCESS)
2389 return status;
2390
2391 scic_sds_stp_request_udma_complete_request(sci_req,
2392 SCU_TASK_DONE_CHECK_RESPONSE,
2393 SCI_FAILURE_IO_RESPONSE_VALID);
2394
2395 return status;
2396}
2397
2398enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
2399 u32 len,
2400 enum dma_data_direction dir)
2401{
2402 return SCI_SUCCESS;
2403}
2404
2405/**
2406 *
2407 * @sci_req:
2408 * @completion_code:
2409 *
2410 * This method processes a TC completion. The expected TC completion is for
2411 * the transmission of the H2D register FIS containing the SATA/STP non-data
2412 * request. This method always successfully processes the TC completion.
2413 * SCI_SUCCESS This value is always returned.
2414 */
2415static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
2416 struct scic_sds_request *sci_req,
2417 u32 completion_code)
2418{
2419 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2420 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2421 scic_sds_request_set_status(
2422 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
2423 );
2424
2425 sci_base_state_machine_change_state(
2426 &sci_req->state_machine,
2427 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
2428 );
2429 break;
2430
2431 default:
2432 /*
2433 * All other completion status cause the IO to be complete. If a NAK
2434 * was received, then it is up to the user to retry the request. */
2435 scic_sds_request_set_status(
2436 sci_req,
2437 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2438 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2439 );
2440
2441 sci_base_state_machine_change_state(
2442 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
2443 break;
2444 }
2445
2446 return SCI_SUCCESS;
2447}
2448
2449/**
2450 *
2451 * @sci_req:
2452 * @completion_code:
2453 *
2454 * This method processes a TC completion. The expected TC completion is for
2455 * the transmission of the H2D register FIS containing the SATA/STP non-data
2456 * request. This method always successfully processes the TC completion.
2457 * SCI_SUCCESS This value is always returned.
2458 */
2459static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
2460 struct scic_sds_request *sci_req,
2461 u32 completion_code)
2462{
2463 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2464 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2465 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2466 SCI_SUCCESS);
2467
2468 sci_base_state_machine_change_state(&sci_req->state_machine,
2469 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE);
2470 break;
2471
2472 default:
2473 /*
2474 * All other completion status cause the IO to be complete. If a NAK
2475 * was received, then it is up to the user to retry the request. */
2476 scic_sds_request_set_status(
2477 sci_req,
2478 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2479 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2480 );
2481
2482 sci_base_state_machine_change_state(&sci_req->state_machine,
2483 SCI_BASE_REQUEST_STATE_COMPLETED);
1436 break; 2484 break;
1437 } 2485 }
1438 2486
1439 return SCI_SUCCESS; 2487 return SCI_SUCCESS;
1440} 2488}
1441 2489
2490/**
2491 *
2492 * @request: This parameter specifies the request for which a frame has been
2493 * received.
2494 * @frame_index: This parameter specifies the index of the frame that has been
2495 * received.
2496 *
2497 * This method processes frames received from the target while waiting for a
2498 * device to host register FIS. If a non-register FIS is received during this
2499 * time, it is treated as a protocol violation from an IO perspective. Indicate
2500 * if the received frame was processed successfully.
2501 */
2502static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
2503 struct scic_sds_request *sci_req,
2504 u32 frame_index)
2505{
2506 enum sci_status status;
2507 struct dev_to_host_fis *frame_header;
2508 u32 *frame_buffer;
2509 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2510 struct scic_sds_controller *scic = sci_req->owning_controller;
2511
2512 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2513 frame_index,
2514 (void **)&frame_header);
2515 if (status != SCI_SUCCESS) {
2516 dev_err(scic_to_dev(scic),
2517 "%s: SCIC IO Request 0x%p could not get frame header "
2518 "for frame index %d, status %x\n",
2519 __func__, stp_req, frame_index, status);
2520 return status;
2521 }
2522
2523 switch (frame_header->fis_type) {
2524 case FIS_REGD2H:
2525 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2526 frame_index,
2527 (void **)&frame_buffer);
2528
2529 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2530 frame_header,
2531 frame_buffer);
2532
2533 /* The command has completed with error */
2534 scic_sds_request_set_status(sci_req,
2535 SCU_TASK_DONE_CHECK_RESPONSE,
2536 SCI_FAILURE_IO_RESPONSE_VALID);
2537 break;
2538
2539 default:
2540 dev_warn(scic_to_dev(scic),
2541 "%s: IO Request:0x%p Frame Id:%d protocol "
2542 "violation occurred\n", __func__, stp_req,
2543 frame_index);
2544
2545 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
2546 SCI_FAILURE_PROTOCOL_VIOLATION);
2547 break;
2548 }
2549
2550 sci_base_state_machine_change_state(&sci_req->state_machine,
2551 SCI_BASE_REQUEST_STATE_COMPLETED);
2552
2553 /* Frame has been decoded return it to the controller */
2554 scic_sds_controller_release_frame(scic, frame_index);
2555
2556 return status;
2557}
2558
1442static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { 2559static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
1443 [SCI_BASE_REQUEST_STATE_INITIAL] = { }, 2560 [SCI_BASE_REQUEST_STATE_INITIAL] = { },
1444 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { 2561 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
@@ -1467,6 +2584,52 @@ static const struct scic_sds_io_request_state_handler scic_sds_request_state_han
1467 .abort_handler = scic_sds_request_started_state_abort_handler, 2584 .abort_handler = scic_sds_request_started_state_abort_handler,
1468 .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, 2585 .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler,
1469 }, 2586 },
2587 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
2588 .abort_handler = scic_sds_request_started_state_abort_handler,
2589 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
2590 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
2591 },
2592 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
2593 .abort_handler = scic_sds_request_started_state_abort_handler,
2594 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
2595 },
2596 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
2597 .abort_handler = scic_sds_request_started_state_abort_handler,
2598 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
2599 },
2600 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
2601 .abort_handler = scic_sds_request_started_state_abort_handler,
2602 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
2603 },
2604 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
2605 .abort_handler = scic_sds_request_started_state_abort_handler,
2606 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
2607 },
2608 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
2609 .abort_handler = scic_sds_request_started_state_abort_handler,
2610 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
2611 },
2612 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
2613 .abort_handler = scic_sds_request_started_state_abort_handler,
2614 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
2615 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
2616 },
2617 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
2618 .abort_handler = scic_sds_request_started_state_abort_handler,
2619 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
2620 },
2621 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
2622 .abort_handler = scic_sds_request_started_state_abort_handler,
2623 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
2624 },
2625 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
2626 .abort_handler = scic_sds_request_started_state_abort_handler,
2627 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
2628 },
2629 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
2630 .abort_handler = scic_sds_request_started_state_abort_handler,
2631 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
2632 },
1470 [SCI_BASE_REQUEST_STATE_COMPLETED] = { 2633 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1471 .complete_handler = scic_sds_request_completed_state_complete_handler, 2634 .complete_handler = scic_sds_request_completed_state_complete_handler,
1472 }, 2635 },
@@ -2210,15 +3373,6 @@ static void scic_sds_request_constructed_state_enter(void *object)
2210 ); 3373 );
2211} 3374}
2212 3375
2213/**
2214 * scic_sds_request_started_state_enter() -
2215 * @object: This parameter specifies the base object for which the state
2216 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
2217 *
2218 * This method implements the actions taken when entering the
2219 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
2220 * SCSI Task request we must enter the started substate machine. none
2221 */
2222static void scic_sds_request_started_state_enter(void *object) 3376static void scic_sds_request_started_state_enter(void *object)
2223{ 3377{
2224 struct scic_sds_request *sci_req = object; 3378 struct scic_sds_request *sci_req = object;
@@ -2238,40 +3392,36 @@ static void scic_sds_request_started_state_enter(void *object)
2238 SCI_BASE_REQUEST_STATE_STARTED 3392 SCI_BASE_REQUEST_STATE_STARTED
2239 ); 3393 );
2240 3394
2241 /* Most of the request state machines have a started substate machine so 3395 /* all unaccelerated request types (non ssp or ncq) handled with
2242 * start its execution on the entry to the started state. 3396 * substates
2243 */ 3397 */
2244 if (sci_req->has_started_substate_machine == true)
2245 sci_base_state_machine_start(&sci_req->started_substate_machine);
2246
2247 if (!task && dev->dev_type == SAS_END_DEV) { 3398 if (!task && dev->dev_type == SAS_END_DEV) {
2248 sci_base_state_machine_change_state(sm, 3399 sci_base_state_machine_change_state(sm,
2249 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION); 3400 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION);
3401 } else if (!task &&
3402 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3403 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3404 sci_base_state_machine_change_state(sm,
3405 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
2250 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 3406 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2251 sci_base_state_machine_change_state(sm, 3407 sci_base_state_machine_change_state(sm,
2252 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE); 3408 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE);
3409 } else if (task && sas_protocol_ata(task->task_proto) &&
3410 !task->ata_task.use_ncq) {
3411 u32 state;
3412
3413 if (task->data_dir == DMA_NONE)
3414 state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE;
3415 else if (task->ata_task.dma_xfer)
3416 state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE;
3417 else /* PIO */
3418 state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE;
3419
3420 sci_base_state_machine_change_state(sm, state);
2253 } 3421 }
2254} 3422}
2255 3423
2256/** 3424/**
2257 * scic_sds_request_started_state_exit() -
2258 * @object: This parameter specifies the base object for which the state
2259 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
2260 * object.
2261 *
2262 * This method implements the actions taken when exiting the
2263 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
2264 * to stop the started substate machine. none
2265 */
2266static void scic_sds_request_started_state_exit(void *object)
2267{
2268 struct scic_sds_request *sci_req = object;
2269
2270 if (sci_req->has_started_substate_machine == true)
2271 sci_base_state_machine_stop(&sci_req->started_substate_machine);
2272}
2273
2274/**
2275 * scic_sds_request_completed_state_enter() - 3425 * scic_sds_request_completed_state_enter() -
2276 * @object: This parameter specifies the base object for which the state 3426 * @object: This parameter specifies the base object for which the state
2277 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST 3427 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
@@ -2392,6 +3542,175 @@ static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void
2392 ); 3542 );
2393} 3543}
2394 3544
3545static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
3546 void *object)
3547{
3548 struct scic_sds_request *sci_req = object;
3549
3550 SET_STATE_HANDLER(
3551 sci_req,
3552 scic_sds_request_state_handler_table,
3553 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
3554 );
3555
3556 scic_sds_remote_device_set_working_request(
3557 sci_req->target_device, sci_req
3558 );
3559}
3560
3561static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
3562{
3563 struct scic_sds_request *sci_req = object;
3564
3565 SET_STATE_HANDLER(
3566 sci_req,
3567 scic_sds_request_state_handler_table,
3568 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
3569 );
3570}
3571
3572
3573
3574static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
3575 void *object)
3576{
3577 struct scic_sds_request *sci_req = object;
3578
3579 SET_STATE_HANDLER(
3580 sci_req,
3581 scic_sds_request_state_handler_table,
3582 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
3583 );
3584
3585 scic_sds_remote_device_set_working_request(
3586 sci_req->target_device, sci_req);
3587}
3588
3589static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
3590{
3591 struct scic_sds_request *sci_req = object;
3592
3593 SET_STATE_HANDLER(
3594 sci_req,
3595 scic_sds_request_state_handler_table,
3596 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
3597 );
3598}
3599
3600static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
3601 void *object)
3602{
3603 struct scic_sds_request *sci_req = object;
3604
3605 SET_STATE_HANDLER(
3606 sci_req,
3607 scic_sds_request_state_handler_table,
3608 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
3609 );
3610}
3611
3612static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
3613 void *object)
3614{
3615 struct scic_sds_request *sci_req = object;
3616
3617 SET_STATE_HANDLER(
3618 sci_req,
3619 scic_sds_request_state_handler_table,
3620 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
3621 );
3622}
3623
3624
3625
3626static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
3627 void *object)
3628{
3629 struct scic_sds_request *sci_req = object;
3630
3631 SET_STATE_HANDLER(
3632 sci_req,
3633 scic_sds_request_state_handler_table,
3634 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
3635 );
3636}
3637
3638/**
3639 *
3640 *
3641 * This state is entered when there is an TC completion failure. The hardware
3642 * received an unexpected condition while processing the IO request and now
3643 * will UF the D2H register FIS to complete the IO.
3644 */
3645static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
3646 void *object)
3647{
3648 struct scic_sds_request *sci_req = object;
3649
3650 SET_STATE_HANDLER(
3651 sci_req,
3652 scic_sds_request_state_handler_table,
3653 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
3654 );
3655}
3656
3657
3658
3659static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
3660 void *object)
3661{
3662 struct scic_sds_request *sci_req = object;
3663
3664 SET_STATE_HANDLER(
3665 sci_req,
3666 scic_sds_request_state_handler_table,
3667 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
3668 );
3669
3670 scic_sds_remote_device_set_working_request(
3671 sci_req->target_device, sci_req
3672 );
3673}
3674
3675static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
3676 void *object)
3677{
3678 struct scic_sds_request *sci_req = object;
3679 struct scu_task_context *task_context;
3680 struct host_to_dev_fis *h2d_fis;
3681 enum sci_status status;
3682
3683 /* Clear the SRST bit */
3684 h2d_fis = &sci_req->stp.cmd;
3685 h2d_fis->control = 0;
3686
3687 /* Clear the TC control bit */
3688 task_context = scic_sds_controller_get_task_context_buffer(
3689 sci_req->owning_controller, sci_req->io_tag);
3690 task_context->control_frame = 0;
3691
3692 status = scic_controller_continue_io(sci_req);
3693 if (status == SCI_SUCCESS) {
3694 SET_STATE_HANDLER(
3695 sci_req,
3696 scic_sds_request_state_handler_table,
3697 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
3698 );
3699 }
3700}
3701
3702static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
3703 void *object)
3704{
3705 struct scic_sds_request *sci_req = object;
3706
3707 SET_STATE_HANDLER(
3708 sci_req,
3709 scic_sds_request_state_handler_table,
3710 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
3711 );
3712}
3713
2395static const struct sci_base_state scic_sds_request_state_table[] = { 3714static const struct sci_base_state scic_sds_request_state_table[] = {
2396 [SCI_BASE_REQUEST_STATE_INITIAL] = { 3715 [SCI_BASE_REQUEST_STATE_INITIAL] = {
2397 .enter_state = scic_sds_request_initial_state_enter, 3716 .enter_state = scic_sds_request_initial_state_enter,
@@ -2401,7 +3720,39 @@ static const struct sci_base_state scic_sds_request_state_table[] = {
2401 }, 3720 },
2402 [SCI_BASE_REQUEST_STATE_STARTED] = { 3721 [SCI_BASE_REQUEST_STATE_STARTED] = {
2403 .enter_state = scic_sds_request_started_state_enter, 3722 .enter_state = scic_sds_request_started_state_enter,
2404 .exit_state = scic_sds_request_started_state_exit 3723 },
3724 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3725 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
3726 },
3727 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
3728 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
3729 },
3730 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3731 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
3732 },
3733 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
3734 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
3735 },
3736 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
3737 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
3738 },
3739 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
3740 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
3741 },
3742 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
3743 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
3744 },
3745 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
3746 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
3747 },
3748 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
3749 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3750 },
3751 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
3752 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3753 },
3754 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
3755 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
2405 }, 3756 },
2406 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { 3757 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
2407 .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter, 3758 .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter,
@@ -2437,7 +3788,6 @@ static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
2437 sci_req->io_tag = io_tag; 3788 sci_req->io_tag = io_tag;
2438 sci_req->owning_controller = scic; 3789 sci_req->owning_controller = scic;
2439 sci_req->target_device = sci_dev; 3790 sci_req->target_device = sci_dev;
2440 sci_req->has_started_substate_machine = false;
2441 sci_req->protocol = SCIC_NO_PROTOCOL; 3791 sci_req->protocol = SCIC_NO_PROTOCOL;
2442 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3792 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
2443 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); 3793 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
@@ -3065,6 +4415,3 @@ int isci_request_execute(
3065 *isci_request = request; 4415 *isci_request = request;
3066 return ret; 4416 return ret;
3067} 4417}
3068
3069
3070